generated: hack/update-vendor.sh
This commit is contained in:
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.go
generated
vendored
@@ -2,8 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
// +build !gccgo,!appengine
|
||||
// +build go1.11,!gccgo,!purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
3
vendor/golang.org/x/crypto/chacha20/chacha_arm64.s
generated
vendored
@@ -2,8 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.11
|
||||
// +build !gccgo,!appengine
|
||||
// +build go1.11,!gccgo,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
140
vendor/golang.org/x/crypto/chacha20/chacha_generic.go
generated
vendored
140
vendor/golang.org/x/crypto/chacha20/chacha_generic.go
generated
vendored
@@ -42,10 +42,14 @@ type Cipher struct {
|
||||
|
||||
// The last len bytes of buf are leftover key stream bytes from the previous
|
||||
// XORKeyStream invocation. The size of buf depends on how many blocks are
|
||||
// computed at a time.
|
||||
// computed at a time by xorKeyStreamBlocks.
|
||||
buf [bufSize]byte
|
||||
len int
|
||||
|
||||
// overflow is set when the counter overflowed, no more blocks can be
|
||||
// generated, and the next XORKeyStream call should panic.
|
||||
overflow bool
|
||||
|
||||
// The counter-independent results of the first round are cached after they
|
||||
// are computed the first time.
|
||||
precompDone bool
|
||||
@@ -89,6 +93,7 @@ func newUnauthenticatedCipher(c *Cipher, key, nonce []byte) (*Cipher, error) {
|
||||
return nil, errors.New("chacha20: wrong nonce size")
|
||||
}
|
||||
|
||||
key, nonce = key[:KeySize], nonce[:NonceSize] // bounds check elimination hint
|
||||
c.key = [8]uint32{
|
||||
binary.LittleEndian.Uint32(key[0:4]),
|
||||
binary.LittleEndian.Uint32(key[4:8]),
|
||||
@@ -136,6 +141,36 @@ func quarterRound(a, b, c, d uint32) (uint32, uint32, uint32, uint32) {
|
||||
return a, b, c, d
|
||||
}
|
||||
|
||||
// SetCounter sets the Cipher counter. The next invocation of XORKeyStream will
|
||||
// behave as if (64 * counter) bytes had been encrypted so far.
|
||||
//
|
||||
// To prevent accidental counter reuse, SetCounter panics if counter is less
|
||||
// than the current value.
|
||||
//
|
||||
// Note that the execution time of XORKeyStream is not independent of the
|
||||
// counter value.
|
||||
func (s *Cipher) SetCounter(counter uint32) {
|
||||
// Internally, s may buffer multiple blocks, which complicates this
|
||||
// implementation slightly. When checking whether the counter has rolled
|
||||
// back, we must use both s.counter and s.len to determine how many blocks
|
||||
// we have already output.
|
||||
outputCounter := s.counter - uint32(s.len)/blockSize
|
||||
if s.overflow || counter < outputCounter {
|
||||
panic("chacha20: SetCounter attempted to rollback counter")
|
||||
}
|
||||
|
||||
// In the general case, we set the new counter value and reset s.len to 0,
|
||||
// causing the next call to XORKeyStream to refill the buffer. However, if
|
||||
// we're advancing within the existing buffer, we can save work by simply
|
||||
// setting s.len.
|
||||
if counter < s.counter {
|
||||
s.len = int(s.counter-counter) * blockSize
|
||||
} else {
|
||||
s.counter = counter
|
||||
s.len = 0
|
||||
}
|
||||
}
|
||||
|
||||
// XORKeyStream XORs each byte in the given slice with a byte from the
|
||||
// cipher's key stream. Dst and src must overlap entirely or not at all.
|
||||
//
|
||||
@@ -169,34 +204,52 @@ func (s *Cipher) XORKeyStream(dst, src []byte) {
|
||||
dst[i] = src[i] ^ b
|
||||
}
|
||||
s.len -= len(keyStream)
|
||||
src = src[len(keyStream):]
|
||||
dst = dst[len(keyStream):]
|
||||
dst, src = dst[len(keyStream):], src[len(keyStream):]
|
||||
}
|
||||
if len(src) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
const blocksPerBuf = bufSize / blockSize
|
||||
numBufs := (uint64(len(src)) + bufSize - 1) / bufSize
|
||||
if uint64(s.counter)+numBufs*blocksPerBuf >= 1<<32 {
|
||||
// If we'd need to let the counter overflow and keep generating output,
|
||||
// panic immediately. If instead we'd only reach the last block, remember
|
||||
// not to generate any more output after the buffer is drained.
|
||||
numBlocks := (uint64(len(src)) + blockSize - 1) / blockSize
|
||||
if s.overflow || uint64(s.counter)+numBlocks > 1<<32 {
|
||||
panic("chacha20: counter overflow")
|
||||
} else if uint64(s.counter)+numBlocks == 1<<32 {
|
||||
s.overflow = true
|
||||
}
|
||||
|
||||
// xorKeyStreamBlocks implementations expect input lengths that are a
|
||||
// multiple of bufSize. Platform-specific ones process multiple blocks at a
|
||||
// time, so have bufSizes that are a multiple of blockSize.
|
||||
|
||||
rem := len(src) % bufSize
|
||||
full := len(src) - rem
|
||||
|
||||
full := len(src) - len(src)%bufSize
|
||||
if full > 0 {
|
||||
s.xorKeyStreamBlocks(dst[:full], src[:full])
|
||||
}
|
||||
dst, src = dst[full:], src[full:]
|
||||
|
||||
// If using a multi-block xorKeyStreamBlocks would overflow, use the generic
|
||||
// one that does one block at a time.
|
||||
const blocksPerBuf = bufSize / blockSize
|
||||
if uint64(s.counter)+blocksPerBuf > 1<<32 {
|
||||
s.buf = [bufSize]byte{}
|
||||
numBlocks := (len(src) + blockSize - 1) / blockSize
|
||||
buf := s.buf[bufSize-numBlocks*blockSize:]
|
||||
copy(buf, src)
|
||||
s.xorKeyStreamBlocksGeneric(buf, buf)
|
||||
s.len = len(buf) - copy(dst, buf)
|
||||
return
|
||||
}
|
||||
|
||||
// If we have a partial (multi-)block, pad it for xorKeyStreamBlocks, and
|
||||
// keep the leftover keystream for the next XORKeyStream invocation.
|
||||
if rem > 0 {
|
||||
if len(src) > 0 {
|
||||
s.buf = [bufSize]byte{}
|
||||
copy(s.buf[:], src[full:])
|
||||
copy(s.buf[:], src)
|
||||
s.xorKeyStreamBlocks(s.buf[:], s.buf[:])
|
||||
s.len = bufSize - copy(dst[full:], s.buf[:])
|
||||
s.len = bufSize - copy(dst, s.buf[:])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -233,7 +286,9 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
|
||||
s.precompDone = true
|
||||
}
|
||||
|
||||
for i := 0; i < len(src); i += blockSize {
|
||||
// A condition of len(src) > 0 would be sufficient, but this also
|
||||
// acts as a bounds check elimination hint.
|
||||
for len(src) >= 64 && len(dst) >= 64 {
|
||||
// The remainder of the first column round.
|
||||
fcr0, fcr4, fcr8, fcr12 := quarterRound(c0, c4, c8, s.counter)
|
||||
|
||||
@@ -258,49 +313,28 @@ func (s *Cipher) xorKeyStreamBlocksGeneric(dst, src []byte) {
|
||||
x3, x4, x9, x14 = quarterRound(x3, x4, x9, x14)
|
||||
}
|
||||
|
||||
// Finally, add back the initial state to generate the key stream.
|
||||
x0 += c0
|
||||
x1 += c1
|
||||
x2 += c2
|
||||
x3 += c3
|
||||
x4 += c4
|
||||
x5 += c5
|
||||
x6 += c6
|
||||
x7 += c7
|
||||
x8 += c8
|
||||
x9 += c9
|
||||
x10 += c10
|
||||
x11 += c11
|
||||
x12 += s.counter
|
||||
x13 += c13
|
||||
x14 += c14
|
||||
x15 += c15
|
||||
// Add back the initial state to generate the key stream, then
|
||||
// XOR the key stream with the source and write out the result.
|
||||
addXor(dst[0:4], src[0:4], x0, c0)
|
||||
addXor(dst[4:8], src[4:8], x1, c1)
|
||||
addXor(dst[8:12], src[8:12], x2, c2)
|
||||
addXor(dst[12:16], src[12:16], x3, c3)
|
||||
addXor(dst[16:20], src[16:20], x4, c4)
|
||||
addXor(dst[20:24], src[20:24], x5, c5)
|
||||
addXor(dst[24:28], src[24:28], x6, c6)
|
||||
addXor(dst[28:32], src[28:32], x7, c7)
|
||||
addXor(dst[32:36], src[32:36], x8, c8)
|
||||
addXor(dst[36:40], src[36:40], x9, c9)
|
||||
addXor(dst[40:44], src[40:44], x10, c10)
|
||||
addXor(dst[44:48], src[44:48], x11, c11)
|
||||
addXor(dst[48:52], src[48:52], x12, s.counter)
|
||||
addXor(dst[52:56], src[52:56], x13, c13)
|
||||
addXor(dst[56:60], src[56:60], x14, c14)
|
||||
addXor(dst[60:64], src[60:64], x15, c15)
|
||||
|
||||
s.counter += 1
|
||||
if s.counter == 0 {
|
||||
panic("chacha20: internal error: counter overflow")
|
||||
}
|
||||
|
||||
in, out := src[i:], dst[i:]
|
||||
in, out = in[:blockSize], out[:blockSize] // bounds check elimination hint
|
||||
|
||||
// XOR the key stream with the source and write out the result.
|
||||
xor(out[0:], in[0:], x0)
|
||||
xor(out[4:], in[4:], x1)
|
||||
xor(out[8:], in[8:], x2)
|
||||
xor(out[12:], in[12:], x3)
|
||||
xor(out[16:], in[16:], x4)
|
||||
xor(out[20:], in[20:], x5)
|
||||
xor(out[24:], in[24:], x6)
|
||||
xor(out[28:], in[28:], x7)
|
||||
xor(out[32:], in[32:], x8)
|
||||
xor(out[36:], in[36:], x9)
|
||||
xor(out[40:], in[40:], x10)
|
||||
xor(out[44:], in[44:], x11)
|
||||
xor(out[48:], in[48:], x12)
|
||||
xor(out[52:], in[52:], x13)
|
||||
xor(out[56:], in[56:], x14)
|
||||
xor(out[60:], in[60:], x15)
|
||||
src, dst = src[blockSize:], dst[blockSize:]
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
2
vendor/golang.org/x/crypto/chacha20/chacha_noasm.go
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo appengine
|
||||
// +build !arm64,!s390x,!ppc64le arm64,!go1.11 gccgo purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
2
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
generated
vendored
2
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.go
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
2
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
generated
vendored
2
vendor/golang.org/x/crypto/chacha20/chacha_ppc64le.s
generated
vendored
@@ -19,7 +19,7 @@
|
||||
// The differences in this and the original implementation are
|
||||
// due to the calling conventions and initialization of constants.
|
||||
|
||||
// +build !gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
2
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
2
vendor/golang.org/x/crypto/chacha20/chacha_s390x.go
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
package chacha20
|
||||
|
||||
|
2
vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
generated
vendored
2
vendor/golang.org/x/crypto/chacha20/chacha_s390x.s
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
#include "go_asm.h"
|
||||
#include "textflag.h"
|
||||
|
17
vendor/golang.org/x/crypto/chacha20/xor.go
generated
vendored
17
vendor/golang.org/x/crypto/chacha20/xor.go
generated
vendored
@@ -13,10 +13,10 @@ const unaligned = runtime.GOARCH == "386" ||
|
||||
runtime.GOARCH == "ppc64le" ||
|
||||
runtime.GOARCH == "s390x"
|
||||
|
||||
// xor reads a little endian uint32 from src, XORs it with u and
|
||||
// addXor reads a little endian uint32 from src, XORs it with (a + b) and
|
||||
// places the result in little endian byte order in dst.
|
||||
func xor(dst, src []byte, u uint32) {
|
||||
_, _ = src[3], dst[3] // eliminate bounds checks
|
||||
func addXor(dst, src []byte, a, b uint32) {
|
||||
_, _ = src[3], dst[3] // bounds check elimination hint
|
||||
if unaligned {
|
||||
// The compiler should optimize this code into
|
||||
// 32-bit unaligned little endian loads and stores.
|
||||
@@ -27,15 +27,16 @@ func xor(dst, src []byte, u uint32) {
|
||||
v |= uint32(src[1]) << 8
|
||||
v |= uint32(src[2]) << 16
|
||||
v |= uint32(src[3]) << 24
|
||||
v ^= u
|
||||
v ^= a + b
|
||||
dst[0] = byte(v)
|
||||
dst[1] = byte(v >> 8)
|
||||
dst[2] = byte(v >> 16)
|
||||
dst[3] = byte(v >> 24)
|
||||
} else {
|
||||
dst[0] = src[0] ^ byte(u)
|
||||
dst[1] = src[1] ^ byte(u>>8)
|
||||
dst[2] = src[2] ^ byte(u>>16)
|
||||
dst[3] = src[3] ^ byte(u>>24)
|
||||
a += b
|
||||
dst[0] = src[0] ^ byte(a)
|
||||
dst[1] = src[1] ^ byte(a>>8)
|
||||
dst[2] = src[2] ^ byte(a>>16)
|
||||
dst[3] = src[3] ^ byte(a>>24)
|
||||
}
|
||||
}
|
||||
|
6
vendor/golang.org/x/crypto/cryptobyte/asn1.go
generated
vendored
6
vendor/golang.org/x/crypto/cryptobyte/asn1.go
generated
vendored
@@ -81,7 +81,7 @@ func (b *Builder) AddASN1BigInt(n *big.Int) {
|
||||
for i := range bytes {
|
||||
bytes[i] ^= 0xff
|
||||
}
|
||||
if bytes[0]&0x80 == 0 {
|
||||
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
|
||||
c.add(0xff)
|
||||
}
|
||||
c.add(bytes...)
|
||||
@@ -230,12 +230,12 @@ func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
|
||||
|
||||
// String
|
||||
|
||||
// ReadASN1Boolean decodes an ASN.1 INTEGER and converts it to a boolean
|
||||
// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean
|
||||
// representation into out and advances. It reports whether the read
|
||||
// was successful.
|
||||
func (s *String) ReadASN1Boolean(out *bool) bool {
|
||||
var bytes String
|
||||
if !s.ReadASN1(&bytes, asn1.INTEGER) || len(bytes) != 1 {
|
||||
if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
|
2
vendor/golang.org/x/crypto/poly1305/BUILD
generated
vendored
2
vendor/golang.org/x/crypto/poly1305/BUILD
generated
vendored
@@ -10,12 +10,10 @@ go_library(
|
||||
"sum_amd64.go",
|
||||
"sum_amd64.s",
|
||||
"sum_generic.go",
|
||||
"sum_noasm.go",
|
||||
"sum_ppc64le.go",
|
||||
"sum_ppc64le.s",
|
||||
"sum_s390x.go",
|
||||
"sum_s390x.s",
|
||||
"sum_vmsl_s390x.s",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/crypto/poly1305",
|
||||
importpath = "golang.org/x/crypto/poly1305",
|
||||
|
4
vendor/golang.org/x/crypto/poly1305/mac_noasm.go
generated
vendored
4
vendor/golang.org/x/crypto/poly1305/mac_noasm.go
generated
vendored
@@ -2,10 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !amd64,!ppc64le gccgo appengine
|
||||
// +build !amd64,!ppc64le,!s390x gccgo purego
|
||||
|
||||
package poly1305
|
||||
|
||||
type mac struct{ macGeneric }
|
||||
|
||||
func newMAC(key *[32]byte) mac { return mac{newMACGeneric(key)} }
|
||||
|
26
vendor/golang.org/x/crypto/poly1305/poly1305.go
generated
vendored
26
vendor/golang.org/x/crypto/poly1305/poly1305.go
generated
vendored
@@ -26,7 +26,9 @@ const TagSize = 16
|
||||
// 16-byte result into out. Authenticating two different messages with the same
|
||||
// key allows an attacker to forge messages at will.
|
||||
func Sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
sum(out, m, key)
|
||||
h := New(key)
|
||||
h.Write(m)
|
||||
h.Sum(out[:0])
|
||||
}
|
||||
|
||||
// Verify returns true if mac is a valid authenticator for m with the given key.
|
||||
@@ -46,10 +48,9 @@ func Verify(mac *[16]byte, m []byte, key *[32]byte) bool {
|
||||
// two different messages with the same key allows an attacker
|
||||
// to forge messages at will.
|
||||
func New(key *[32]byte) *MAC {
|
||||
return &MAC{
|
||||
mac: newMAC(key),
|
||||
finalized: false,
|
||||
}
|
||||
m := &MAC{}
|
||||
initialize(key, &m.macState)
|
||||
return m
|
||||
}
|
||||
|
||||
// MAC is an io.Writer computing an authentication tag
|
||||
@@ -58,7 +59,7 @@ func New(key *[32]byte) *MAC {
|
||||
// MAC cannot be used like common hash.Hash implementations,
|
||||
// because using a poly1305 key twice breaks its security.
|
||||
// Therefore writing data to a running MAC after calling
|
||||
// Sum causes it to panic.
|
||||
// Sum or Verify causes it to panic.
|
||||
type MAC struct {
|
||||
mac // platform-dependent implementation
|
||||
|
||||
@@ -71,10 +72,10 @@ func (h *MAC) Size() int { return TagSize }
|
||||
// Write adds more data to the running message authentication code.
|
||||
// It never returns an error.
|
||||
//
|
||||
// It must not be called after the first call of Sum.
|
||||
// It must not be called after the first call of Sum or Verify.
|
||||
func (h *MAC) Write(p []byte) (n int, err error) {
|
||||
if h.finalized {
|
||||
panic("poly1305: write to MAC after Sum")
|
||||
panic("poly1305: write to MAC after Sum or Verify")
|
||||
}
|
||||
return h.mac.Write(p)
|
||||
}
|
||||
@@ -87,3 +88,12 @@ func (h *MAC) Sum(b []byte) []byte {
|
||||
h.finalized = true
|
||||
return append(b, mac[:]...)
|
||||
}
|
||||
|
||||
// Verify returns whether the authenticator of all data written to
|
||||
// the message authentication code matches the expected value.
|
||||
func (h *MAC) Verify(expected []byte) bool {
|
||||
var mac [TagSize]byte
|
||||
h.mac.Sum(&mac)
|
||||
h.finalized = true
|
||||
return subtle.ConstantTimeCompare(expected, mac[:]) == 1
|
||||
}
|
||||
|
13
vendor/golang.org/x/crypto/poly1305/sum_amd64.go
generated
vendored
13
vendor/golang.org/x/crypto/poly1305/sum_amd64.go
generated
vendored
@@ -2,24 +2,13 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
package poly1305
|
||||
|
||||
//go:noescape
|
||||
func update(state *macState, msg []byte)
|
||||
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(m)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMAC(key *[32]byte) (h mac) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
}
|
||||
|
||||
// mac is a wrapper for macGeneric that redirects calls that would have gone to
|
||||
// updateGeneric to update.
|
||||
//
|
||||
|
2
vendor/golang.org/x/crypto/poly1305/sum_amd64.s
generated
vendored
2
vendor/golang.org/x/crypto/poly1305/sum_amd64.s
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build amd64,!gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
21
vendor/golang.org/x/crypto/poly1305/sum_generic.go
generated
vendored
21
vendor/golang.org/x/crypto/poly1305/sum_generic.go
generated
vendored
@@ -31,16 +31,18 @@ func sumGeneric(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMACGeneric(key *[32]byte) (h macGeneric) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
func newMACGeneric(key *[32]byte) macGeneric {
|
||||
m := macGeneric{}
|
||||
initialize(key, &m.macState)
|
||||
return m
|
||||
}
|
||||
|
||||
// macState holds numbers in saturated 64-bit little-endian limbs. That is,
|
||||
// the value of [x0, x1, x2] is x[0] + x[1] * 2⁶⁴ + x[2] * 2¹²⁸.
|
||||
type macState struct {
|
||||
// h is the main accumulator. It is to be interpreted modulo 2¹³⁰ - 5, but
|
||||
// can grow larger during and after rounds.
|
||||
// can grow larger during and after rounds. It must, however, remain below
|
||||
// 2 * (2¹³⁰ - 5).
|
||||
h [3]uint64
|
||||
// r and s are the private key components.
|
||||
r [2]uint64
|
||||
@@ -97,11 +99,12 @@ const (
|
||||
rMask1 = 0x0FFFFFFC0FFFFFFC
|
||||
)
|
||||
|
||||
func initialize(key *[32]byte, r, s *[2]uint64) {
|
||||
r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
|
||||
r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
|
||||
s[0] = binary.LittleEndian.Uint64(key[16:24])
|
||||
s[1] = binary.LittleEndian.Uint64(key[24:32])
|
||||
// initialize loads the 256-bit key into the two 128-bit secret values r and s.
|
||||
func initialize(key *[32]byte, m *macState) {
|
||||
m.r[0] = binary.LittleEndian.Uint64(key[0:8]) & rMask0
|
||||
m.r[1] = binary.LittleEndian.Uint64(key[8:16]) & rMask1
|
||||
m.s[0] = binary.LittleEndian.Uint64(key[16:24])
|
||||
m.s[1] = binary.LittleEndian.Uint64(key[24:32])
|
||||
}
|
||||
|
||||
// uint128 holds a 128-bit number as two 64-bit limbs, for use with the
|
||||
|
13
vendor/golang.org/x/crypto/poly1305/sum_noasm.go
generated
vendored
13
vendor/golang.org/x/crypto/poly1305/sum_noasm.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,!go1.11 !amd64,!s390x,!ppc64le gccgo appengine nacl
|
||||
|
||||
package poly1305
|
||||
|
||||
func sum(out *[TagSize]byte, msg []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(msg)
|
||||
h.Sum(out)
|
||||
}
|
13
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
generated
vendored
13
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.go
generated
vendored
@@ -2,24 +2,13 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ppc64le,!gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
package poly1305
|
||||
|
||||
//go:noescape
|
||||
func update(state *macState, msg []byte)
|
||||
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
h := newMAC(key)
|
||||
h.Write(m)
|
||||
h.Sum(out)
|
||||
}
|
||||
|
||||
func newMAC(key *[32]byte) (h mac) {
|
||||
initialize(key, &h.r, &h.s)
|
||||
return
|
||||
}
|
||||
|
||||
// mac is a wrapper for macGeneric that redirects calls that would have gone to
|
||||
// updateGeneric to update.
|
||||
//
|
||||
|
2
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s
generated
vendored
2
vendor/golang.org/x/crypto/poly1305/sum_ppc64le.s
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ppc64le,!gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
|
80
vendor/golang.org/x/crypto/poly1305/sum_s390x.go
generated
vendored
80
vendor/golang.org/x/crypto/poly1305/sum_s390x.go
generated
vendored
@@ -2,7 +2,7 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,go1.11,!gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
package poly1305
|
||||
|
||||
@@ -10,30 +10,66 @@ import (
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
// poly1305vx is an assembly implementation of Poly1305 that uses vector
|
||||
// updateVX is an assembly implementation of Poly1305 that uses vector
|
||||
// instructions. It must only be called if the vector facility (vx) is
|
||||
// available.
|
||||
//go:noescape
|
||||
func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
|
||||
func updateVX(state *macState, msg []byte)
|
||||
|
||||
// poly1305vmsl is an assembly implementation of Poly1305 that uses vector
|
||||
// instructions, including VMSL. It must only be called if the vector facility (vx) is
|
||||
// available and if VMSL is supported.
|
||||
//go:noescape
|
||||
func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]byte)
|
||||
// mac is a replacement for macGeneric that uses a larger buffer and redirects
|
||||
// calls that would have gone to updateGeneric to updateVX if the vector
|
||||
// facility is installed.
|
||||
//
|
||||
// A larger buffer is required for good performance because the vector
|
||||
// implementation has a higher fixed cost per call than the generic
|
||||
// implementation.
|
||||
type mac struct {
|
||||
macState
|
||||
|
||||
func sum(out *[16]byte, m []byte, key *[32]byte) {
|
||||
if cpu.S390X.HasVX {
|
||||
var mPtr *byte
|
||||
if len(m) > 0 {
|
||||
mPtr = &m[0]
|
||||
}
|
||||
if cpu.S390X.HasVXE && len(m) > 256 {
|
||||
poly1305vmsl(out, mPtr, uint64(len(m)), key)
|
||||
} else {
|
||||
poly1305vx(out, mPtr, uint64(len(m)), key)
|
||||
}
|
||||
} else {
|
||||
sumGeneric(out, m, key)
|
||||
}
|
||||
buffer [16 * TagSize]byte // size must be a multiple of block size (16)
|
||||
offset int
|
||||
}
|
||||
|
||||
func (h *mac) Write(p []byte) (int, error) {
|
||||
nn := len(p)
|
||||
if h.offset > 0 {
|
||||
n := copy(h.buffer[h.offset:], p)
|
||||
if h.offset+n < len(h.buffer) {
|
||||
h.offset += n
|
||||
return nn, nil
|
||||
}
|
||||
p = p[n:]
|
||||
h.offset = 0
|
||||
if cpu.S390X.HasVX {
|
||||
updateVX(&h.macState, h.buffer[:])
|
||||
} else {
|
||||
updateGeneric(&h.macState, h.buffer[:])
|
||||
}
|
||||
}
|
||||
|
||||
tail := len(p) % len(h.buffer) // number of bytes to copy into buffer
|
||||
body := len(p) - tail // number of bytes to process now
|
||||
if body > 0 {
|
||||
if cpu.S390X.HasVX {
|
||||
updateVX(&h.macState, p[:body])
|
||||
} else {
|
||||
updateGeneric(&h.macState, p[:body])
|
||||
}
|
||||
}
|
||||
h.offset = copy(h.buffer[:], p[body:]) // copy tail bytes - can be 0
|
||||
return nn, nil
|
||||
}
|
||||
|
||||
func (h *mac) Sum(out *[TagSize]byte) {
|
||||
state := h.macState
|
||||
remainder := h.buffer[:h.offset]
|
||||
|
||||
// Use the generic implementation if we have 2 or fewer blocks left
|
||||
// to sum. The vector implementation has a higher startup time.
|
||||
if cpu.S390X.HasVX && len(remainder) > 2*TagSize {
|
||||
updateVX(&state, remainder)
|
||||
} else if len(remainder) > 0 {
|
||||
updateGeneric(&state, remainder)
|
||||
}
|
||||
finalize(out, &state.h, &state.s)
|
||||
}
|
||||
|
633
vendor/golang.org/x/crypto/poly1305/sum_s390x.s
generated
vendored
633
vendor/golang.org/x/crypto/poly1305/sum_s390x.s
generated
vendored
@@ -2,115 +2,187 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,go1.11,!gccgo,!appengine
|
||||
// +build !gccgo,!purego
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Implementation of Poly1305 using the vector facility (vx).
|
||||
// This implementation of Poly1305 uses the vector facility (vx)
|
||||
// to process up to 2 blocks (32 bytes) per iteration using an
|
||||
// algorithm based on the one described in:
|
||||
//
|
||||
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
|
||||
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
|
||||
//
|
||||
// This algorithm uses 5 26-bit limbs to represent a 130-bit
|
||||
// value. These limbs are, for the most part, zero extended and
|
||||
// placed into 64-bit vector register elements. Each vector
|
||||
// register is 128-bits wide and so holds 2 of these elements.
|
||||
// Using 26-bit limbs allows us plenty of headroom to accomodate
|
||||
// accumulations before and after multiplication without
|
||||
// overflowing either 32-bits (before multiplication) or 64-bits
|
||||
// (after multiplication).
|
||||
//
|
||||
// In order to parallelise the operations required to calculate
|
||||
// the sum we use two separate accumulators and then sum those
|
||||
// in an extra final step. For compatibility with the generic
|
||||
// implementation we perform this summation at the end of every
|
||||
// updateVX call.
|
||||
//
|
||||
// To use two accumulators we must multiply the message blocks
|
||||
// by r² rather than r. Only the final message block should be
|
||||
// multiplied by r.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// We want to calculate the sum (h) for a 64 byte message (m):
|
||||
//
|
||||
// h = m[0:16]r⁴ + m[16:32]r³ + m[32:48]r² + m[48:64]r
|
||||
//
|
||||
// To do this we split the calculation into the even indices
|
||||
// and odd indices of the message. These form our SIMD 'lanes':
|
||||
//
|
||||
// h = m[ 0:16]r⁴ + m[32:48]r² + <- lane 0
|
||||
// m[16:32]r³ + m[48:64]r <- lane 1
|
||||
//
|
||||
// To calculate this iteratively we refactor so that both lanes
|
||||
// are written in terms of r² and r:
|
||||
//
|
||||
// h = (m[ 0:16]r² + m[32:48])r² + <- lane 0
|
||||
// (m[16:32]r² + m[48:64])r <- lane 1
|
||||
// ^ ^
|
||||
// | coefficients for second iteration
|
||||
// coefficients for first iteration
|
||||
//
|
||||
// So in this case we would have two iterations. In the first
|
||||
// both lanes are multiplied by r². In the second only the
|
||||
// first lane is multiplied by r² and the second lane is
|
||||
// instead multiplied by r. This gives use the odd and even
|
||||
// powers of r that we need from the original equation.
|
||||
//
|
||||
// Notation:
|
||||
//
|
||||
// h - accumulator
|
||||
// r - key
|
||||
// m - message
|
||||
//
|
||||
// [a, b] - SIMD register holding two 64-bit values
|
||||
// [a, b, c, d] - SIMD register holding four 32-bit values
|
||||
// xᵢ[n] - limb n of variable x with bit width i
|
||||
//
|
||||
// Limbs are expressed in little endian order, so for 26-bit
|
||||
// limbs x₂₆[4] will be the most significant limb and x₂₆[0]
|
||||
// will be the least significant limb.
|
||||
|
||||
// constants
|
||||
#define MOD26 V0
|
||||
#define EX0 V1
|
||||
#define EX1 V2
|
||||
#define EX2 V3
|
||||
// masking constants
|
||||
#define MOD24 V0 // [0x0000000000ffffff, 0x0000000000ffffff] - mask low 24-bits
|
||||
#define MOD26 V1 // [0x0000000003ffffff, 0x0000000003ffffff] - mask low 26-bits
|
||||
|
||||
// temporaries
|
||||
#define T_0 V4
|
||||
#define T_1 V5
|
||||
#define T_2 V6
|
||||
#define T_3 V7
|
||||
#define T_4 V8
|
||||
// expansion constants (see EXPAND macro)
|
||||
#define EX0 V2
|
||||
#define EX1 V3
|
||||
#define EX2 V4
|
||||
|
||||
// key (r)
|
||||
#define R_0 V9
|
||||
#define R_1 V10
|
||||
#define R_2 V11
|
||||
#define R_3 V12
|
||||
#define R_4 V13
|
||||
#define R5_1 V14
|
||||
#define R5_2 V15
|
||||
#define R5_3 V16
|
||||
#define R5_4 V17
|
||||
#define RSAVE_0 R5
|
||||
#define RSAVE_1 R6
|
||||
#define RSAVE_2 R7
|
||||
#define RSAVE_3 R8
|
||||
#define RSAVE_4 R9
|
||||
#define R5SAVE_1 V28
|
||||
#define R5SAVE_2 V29
|
||||
#define R5SAVE_3 V30
|
||||
#define R5SAVE_4 V31
|
||||
// key (r², r or 1 depending on context)
|
||||
#define R_0 V5
|
||||
#define R_1 V6
|
||||
#define R_2 V7
|
||||
#define R_3 V8
|
||||
#define R_4 V9
|
||||
|
||||
// message block
|
||||
#define F_0 V18
|
||||
#define F_1 V19
|
||||
#define F_2 V20
|
||||
#define F_3 V21
|
||||
#define F_4 V22
|
||||
// precalculated coefficients (5r², 5r or 0 depending on context)
|
||||
#define R5_1 V10
|
||||
#define R5_2 V11
|
||||
#define R5_3 V12
|
||||
#define R5_4 V13
|
||||
|
||||
// accumulator
|
||||
#define H_0 V23
|
||||
#define H_1 V24
|
||||
#define H_2 V25
|
||||
#define H_3 V26
|
||||
#define H_4 V27
|
||||
// message block (m)
|
||||
#define M_0 V14
|
||||
#define M_1 V15
|
||||
#define M_2 V16
|
||||
#define M_3 V17
|
||||
#define M_4 V18
|
||||
|
||||
GLOBL ·keyMask<>(SB), RODATA, $16
|
||||
DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
|
||||
DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
|
||||
// accumulator (h)
|
||||
#define H_0 V19
|
||||
#define H_1 V20
|
||||
#define H_2 V21
|
||||
#define H_3 V22
|
||||
#define H_4 V23
|
||||
|
||||
GLOBL ·bswapMask<>(SB), RODATA, $16
|
||||
DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
|
||||
DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
|
||||
// temporary registers (for short-lived values)
|
||||
#define T_0 V24
|
||||
#define T_1 V25
|
||||
#define T_2 V26
|
||||
#define T_3 V27
|
||||
#define T_4 V28
|
||||
|
||||
GLOBL ·constants<>(SB), RODATA, $64
|
||||
// MOD26
|
||||
DATA ·constants<>+0(SB)/8, $0x3ffffff
|
||||
DATA ·constants<>+8(SB)/8, $0x3ffffff
|
||||
GLOBL ·constants<>(SB), RODATA, $0x30
|
||||
// EX0
|
||||
DATA ·constants<>+16(SB)/8, $0x0006050403020100
|
||||
DATA ·constants<>+24(SB)/8, $0x1016151413121110
|
||||
DATA ·constants<>+0x00(SB)/8, $0x0006050403020100
|
||||
DATA ·constants<>+0x08(SB)/8, $0x1016151413121110
|
||||
// EX1
|
||||
DATA ·constants<>+32(SB)/8, $0x060c0b0a09080706
|
||||
DATA ·constants<>+40(SB)/8, $0x161c1b1a19181716
|
||||
DATA ·constants<>+0x10(SB)/8, $0x060c0b0a09080706
|
||||
DATA ·constants<>+0x18(SB)/8, $0x161c1b1a19181716
|
||||
// EX2
|
||||
DATA ·constants<>+48(SB)/8, $0x0d0d0d0d0d0f0e0d
|
||||
DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d
|
||||
DATA ·constants<>+0x20(SB)/8, $0x0d0d0d0d0d0f0e0d
|
||||
DATA ·constants<>+0x28(SB)/8, $0x1d1d1d1d1d1f1e1d
|
||||
|
||||
// h = (f*g) % (2**130-5) [partial reduction]
|
||||
// MULTIPLY multiplies each lane of f and g, partially reduced
|
||||
// modulo 2¹³⁰ - 5. The result, h, consists of partial products
|
||||
// in each lane that need to be reduced further to produce the
|
||||
// final result.
|
||||
//
|
||||
// h₁₃₀ = (f₁₃₀g₁₃₀) % 2¹³⁰ + (5f₁₃₀g₁₃₀) / 2¹³⁰
|
||||
//
|
||||
// Note that the multiplication by 5 of the high bits is
|
||||
// achieved by precalculating the multiplication of four of the
|
||||
// g coefficients by 5. These are g51-g54.
|
||||
#define MULTIPLY(f0, f1, f2, f3, f4, g0, g1, g2, g3, g4, g51, g52, g53, g54, h0, h1, h2, h3, h4) \
|
||||
VMLOF f0, g0, h0 \
|
||||
VMLOF f0, g1, h1 \
|
||||
VMLOF f0, g2, h2 \
|
||||
VMLOF f0, g3, h3 \
|
||||
VMLOF f0, g1, h1 \
|
||||
VMLOF f0, g4, h4 \
|
||||
VMLOF f0, g2, h2 \
|
||||
VMLOF f1, g54, T_0 \
|
||||
VMLOF f1, g0, T_1 \
|
||||
VMLOF f1, g1, T_2 \
|
||||
VMLOF f1, g2, T_3 \
|
||||
VMLOF f1, g0, T_1 \
|
||||
VMLOF f1, g3, T_4 \
|
||||
VMLOF f1, g1, T_2 \
|
||||
VMALOF f2, g53, h0, h0 \
|
||||
VMALOF f2, g54, h1, h1 \
|
||||
VMALOF f2, g0, h2, h2 \
|
||||
VMALOF f2, g1, h3, h3 \
|
||||
VMALOF f2, g54, h1, h1 \
|
||||
VMALOF f2, g2, h4, h4 \
|
||||
VMALOF f2, g0, h2, h2 \
|
||||
VMALOF f3, g52, T_0, T_0 \
|
||||
VMALOF f3, g53, T_1, T_1 \
|
||||
VMALOF f3, g54, T_2, T_2 \
|
||||
VMALOF f3, g0, T_3, T_3 \
|
||||
VMALOF f3, g53, T_1, T_1 \
|
||||
VMALOF f3, g1, T_4, T_4 \
|
||||
VMALOF f3, g54, T_2, T_2 \
|
||||
VMALOF f4, g51, h0, h0 \
|
||||
VMALOF f4, g52, h1, h1 \
|
||||
VMALOF f4, g53, h2, h2 \
|
||||
VMALOF f4, g54, h3, h3 \
|
||||
VMALOF f4, g52, h1, h1 \
|
||||
VMALOF f4, g0, h4, h4 \
|
||||
VMALOF f4, g53, h2, h2 \
|
||||
VAG T_0, h0, h0 \
|
||||
VAG T_1, h1, h1 \
|
||||
VAG T_2, h2, h2 \
|
||||
VAG T_3, h3, h3 \
|
||||
VAG T_4, h4, h4
|
||||
VAG T_1, h1, h1 \
|
||||
VAG T_4, h4, h4 \
|
||||
VAG T_2, h2, h2
|
||||
|
||||
// carry h0->h1 h3->h4, h1->h2 h4->h0, h0->h1 h2->h3, h3->h4
|
||||
// REDUCE performs the following carry operations in four
|
||||
// stages, as specified in Bernstein & Schwabe:
|
||||
//
|
||||
// 1: h₂₆[0]->h₂₆[1] h₂₆[3]->h₂₆[4]
|
||||
// 2: h₂₆[1]->h₂₆[2] h₂₆[4]->h₂₆[0]
|
||||
// 3: h₂₆[0]->h₂₆[1] h₂₆[2]->h₂₆[3]
|
||||
// 4: h₂₆[3]->h₂₆[4]
|
||||
//
|
||||
// The result is that all of the limbs are limited to 26-bits
|
||||
// except for h₂₆[1] and h₂₆[4] which are limited to 27-bits.
|
||||
//
|
||||
// Note that although each limb is aligned at 26-bit intervals
|
||||
// they may contain values that exceed 2²⁶ - 1, hence the need
|
||||
// to carry the excess bits in each limb.
|
||||
#define REDUCE(h0, h1, h2, h3, h4) \
|
||||
VESRLG $26, h0, T_0 \
|
||||
VESRLG $26, h3, T_1 \
|
||||
@@ -136,144 +208,155 @@ DATA ·constants<>+56(SB)/8, $0x1d1d1d1d1d1f1e1d
|
||||
VN MOD26, h3, h3 \
|
||||
VAG T_2, h4, h4
|
||||
|
||||
// expand in0 into d[0] and in1 into d[1]
|
||||
// EXPAND splits the 128-bit little-endian values in0 and in1
|
||||
// into 26-bit big-endian limbs and places the results into
|
||||
// the first and second lane of d₂₆[0:4] respectively.
|
||||
//
|
||||
// The EX0, EX1 and EX2 constants are arrays of byte indices
|
||||
// for permutation. The permutation both reverses the bytes
|
||||
// in the input and ensures the bytes are copied into the
|
||||
// destination limb ready to be shifted into their final
|
||||
// position.
|
||||
#define EXPAND(in0, in1, d0, d1, d2, d3, d4) \
|
||||
VGBM $0x0707, d1 \ // d1=tmp
|
||||
VPERM in0, in1, EX2, d4 \
|
||||
VPERM in0, in1, EX0, d0 \
|
||||
VPERM in0, in1, EX1, d2 \
|
||||
VN d1, d4, d4 \
|
||||
VPERM in0, in1, EX2, d4 \
|
||||
VESRLG $26, d0, d1 \
|
||||
VESRLG $30, d2, d3 \
|
||||
VESRLG $4, d2, d2 \
|
||||
VN MOD26, d0, d0 \
|
||||
VN MOD26, d1, d1 \
|
||||
VN MOD26, d2, d2 \
|
||||
VN MOD26, d3, d3
|
||||
VN MOD26, d0, d0 \ // [in0₂₆[0], in1₂₆[0]]
|
||||
VN MOD26, d3, d3 \ // [in0₂₆[3], in1₂₆[3]]
|
||||
VN MOD26, d1, d1 \ // [in0₂₆[1], in1₂₆[1]]
|
||||
VN MOD24, d4, d4 \ // [in0₂₆[4], in1₂₆[4]]
|
||||
VN MOD26, d2, d2 // [in0₂₆[2], in1₂₆[2]]
|
||||
|
||||
// pack h4:h0 into h1:h0 (no carry)
|
||||
#define PACK(h0, h1, h2, h3, h4) \
|
||||
VESLG $26, h1, h1 \
|
||||
VESLG $26, h3, h3 \
|
||||
VO h0, h1, h0 \
|
||||
VO h2, h3, h2 \
|
||||
VESLG $4, h2, h2 \
|
||||
VLEIB $7, $48, h1 \
|
||||
VSLB h1, h2, h2 \
|
||||
VO h0, h2, h0 \
|
||||
VLEIB $7, $104, h1 \
|
||||
VSLB h1, h4, h3 \
|
||||
VO h3, h0, h0 \
|
||||
VLEIB $7, $24, h1 \
|
||||
VSRLB h1, h4, h1
|
||||
// func updateVX(state *macState, msg []byte)
|
||||
TEXT ·updateVX(SB), NOSPLIT, $0
|
||||
MOVD state+0(FP), R1
|
||||
LMG msg+8(FP), R2, R3 // R2=msg_base, R3=msg_len
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
#define MOD(h0, h1, t0, t1, t2) \
|
||||
VZERO t0 \
|
||||
VLEIG $1, $5, t0 \
|
||||
VACCQ h0, t0, t1 \
|
||||
VAQ h0, t0, t0 \
|
||||
VONE t2 \
|
||||
VLEIG $1, $-4, t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VACCQ h1, t1, t1 \
|
||||
VONE t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VN h0, t1, t2 \
|
||||
VNC t0, t1, t1 \
|
||||
VO t1, t2, h0
|
||||
|
||||
// func poly1305vx(out *[16]byte, m *byte, mlen uint64, key *[32]key)
|
||||
TEXT ·poly1305vx(SB), $0-32
|
||||
// This code processes up to 2 blocks (32 bytes) per iteration
|
||||
// using the algorithm described in:
|
||||
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
|
||||
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
|
||||
LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
|
||||
|
||||
// load MOD26, EX0, EX1 and EX2
|
||||
// load EX0, EX1 and EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), MOD26, EX2
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
// setup r
|
||||
VL (R4), T_0
|
||||
MOVD $·keyMask<>(SB), R6
|
||||
VL (R6), T_1
|
||||
VN T_0, T_1, T_0
|
||||
EXPAND(T_0, T_0, R_0, R_1, R_2, R_3, R_4)
|
||||
// generate masks
|
||||
VGMG $(64-24), $63, MOD24 // [0x00ffffff, 0x00ffffff]
|
||||
VGMG $(64-26), $63, MOD26 // [0x03ffffff, 0x03ffffff]
|
||||
|
||||
// setup r*5
|
||||
VLEIG $0, $5, T_0
|
||||
VLEIG $1, $5, T_0
|
||||
// load h (accumulator) and r (key) from state
|
||||
VZERO T_1 // [0, 0]
|
||||
VL 0(R1), T_0 // [h₆₄[0], h₆₄[1]]
|
||||
VLEG $0, 16(R1), T_1 // [h₆₄[2], 0]
|
||||
VL 24(R1), T_2 // [r₆₄[0], r₆₄[1]]
|
||||
VPDI $0, T_0, T_2, T_3 // [h₆₄[0], r₆₄[0]]
|
||||
VPDI $5, T_0, T_2, T_4 // [h₆₄[1], r₆₄[1]]
|
||||
|
||||
// store r (for final block)
|
||||
VMLOF T_0, R_1, R5SAVE_1
|
||||
VMLOF T_0, R_2, R5SAVE_2
|
||||
VMLOF T_0, R_3, R5SAVE_3
|
||||
VMLOF T_0, R_4, R5SAVE_4
|
||||
VLGVG $0, R_0, RSAVE_0
|
||||
VLGVG $0, R_1, RSAVE_1
|
||||
VLGVG $0, R_2, RSAVE_2
|
||||
VLGVG $0, R_3, RSAVE_3
|
||||
VLGVG $0, R_4, RSAVE_4
|
||||
// unpack h and r into 26-bit limbs
|
||||
// note: h₆₄[2] may have the low 3 bits set, so h₂₆[4] is a 27-bit value
|
||||
VN MOD26, T_3, H_0 // [h₂₆[0], r₂₆[0]]
|
||||
VZERO H_1 // [0, 0]
|
||||
VZERO H_3 // [0, 0]
|
||||
VGMG $(64-12-14), $(63-12), T_0 // [0x03fff000, 0x03fff000] - 26-bit mask with low 12 bits masked out
|
||||
VESLG $24, T_1, T_1 // [h₆₄[2]<<24, 0]
|
||||
VERIMG $-26&63, T_3, MOD26, H_1 // [h₂₆[1], r₂₆[1]]
|
||||
VESRLG $+52&63, T_3, H_2 // [h₂₆[2], r₂₆[2]] - low 12 bits only
|
||||
VERIMG $-14&63, T_4, MOD26, H_3 // [h₂₆[1], r₂₆[1]]
|
||||
VESRLG $40, T_4, H_4 // [h₂₆[4], r₂₆[4]] - low 24 bits only
|
||||
VERIMG $+12&63, T_4, T_0, H_2 // [h₂₆[2], r₂₆[2]] - complete
|
||||
VO T_1, H_4, H_4 // [h₂₆[4], r₂₆[4]] - complete
|
||||
|
||||
// skip r**2 calculation
|
||||
// replicate r across all 4 vector elements
|
||||
VREPF $3, H_0, R_0 // [r₂₆[0], r₂₆[0], r₂₆[0], r₂₆[0]]
|
||||
VREPF $3, H_1, R_1 // [r₂₆[1], r₂₆[1], r₂₆[1], r₂₆[1]]
|
||||
VREPF $3, H_2, R_2 // [r₂₆[2], r₂₆[2], r₂₆[2], r₂₆[2]]
|
||||
VREPF $3, H_3, R_3 // [r₂₆[3], r₂₆[3], r₂₆[3], r₂₆[3]]
|
||||
VREPF $3, H_4, R_4 // [r₂₆[4], r₂₆[4], r₂₆[4], r₂₆[4]]
|
||||
|
||||
// zero out lane 1 of h
|
||||
VLEIG $1, $0, H_0 // [h₂₆[0], 0]
|
||||
VLEIG $1, $0, H_1 // [h₂₆[1], 0]
|
||||
VLEIG $1, $0, H_2 // [h₂₆[2], 0]
|
||||
VLEIG $1, $0, H_3 // [h₂₆[3], 0]
|
||||
VLEIG $1, $0, H_4 // [h₂₆[4], 0]
|
||||
|
||||
// calculate 5r (ignore least significant limb)
|
||||
VREPIF $5, T_0
|
||||
VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r₂₆[1], 5r₂₆[1], 5r₂₆[1]]
|
||||
VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r₂₆[2], 5r₂₆[2], 5r₂₆[2]]
|
||||
VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r₂₆[3], 5r₂₆[3], 5r₂₆[3]]
|
||||
VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r₂₆[4], 5r₂₆[4], 5r₂₆[4]]
|
||||
|
||||
// skip r² calculation if we are only calculating one block
|
||||
CMPBLE R3, $16, skip
|
||||
|
||||
// calculate r**2
|
||||
MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5SAVE_1, R5SAVE_2, R5SAVE_3, R5SAVE_4, H_0, H_1, H_2, H_3, H_4)
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
VLEIG $0, $5, T_0
|
||||
VLEIG $1, $5, T_0
|
||||
VMLOF T_0, H_1, R5_1
|
||||
VMLOF T_0, H_2, R5_2
|
||||
VMLOF T_0, H_3, R5_3
|
||||
VMLOF T_0, H_4, R5_4
|
||||
VLR H_0, R_0
|
||||
VLR H_1, R_1
|
||||
VLR H_2, R_2
|
||||
VLR H_3, R_3
|
||||
VLR H_4, R_4
|
||||
// calculate r²
|
||||
MULTIPLY(R_0, R_1, R_2, R_3, R_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, M_0, M_1, M_2, M_3, M_4)
|
||||
REDUCE(M_0, M_1, M_2, M_3, M_4)
|
||||
VGBM $0x0f0f, T_0
|
||||
VERIMG $0, M_0, T_0, R_0 // [r₂₆[0], r²₂₆[0], r₂₆[0], r²₂₆[0]]
|
||||
VERIMG $0, M_1, T_0, R_1 // [r₂₆[1], r²₂₆[1], r₂₆[1], r²₂₆[1]]
|
||||
VERIMG $0, M_2, T_0, R_2 // [r₂₆[2], r²₂₆[2], r₂₆[2], r²₂₆[2]]
|
||||
VERIMG $0, M_3, T_0, R_3 // [r₂₆[3], r²₂₆[3], r₂₆[3], r²₂₆[3]]
|
||||
VERIMG $0, M_4, T_0, R_4 // [r₂₆[4], r²₂₆[4], r₂₆[4], r²₂₆[4]]
|
||||
|
||||
// initialize h
|
||||
VZERO H_0
|
||||
VZERO H_1
|
||||
VZERO H_2
|
||||
VZERO H_3
|
||||
VZERO H_4
|
||||
// calculate 5r² (ignore least significant limb)
|
||||
VREPIF $5, T_0
|
||||
VMLF T_0, R_1, R5_1 // [5r₂₆[1], 5r²₂₆[1], 5r₂₆[1], 5r²₂₆[1]]
|
||||
VMLF T_0, R_2, R5_2 // [5r₂₆[2], 5r²₂₆[2], 5r₂₆[2], 5r²₂₆[2]]
|
||||
VMLF T_0, R_3, R5_3 // [5r₂₆[3], 5r²₂₆[3], 5r₂₆[3], 5r²₂₆[3]]
|
||||
VMLF T_0, R_4, R5_4 // [5r₂₆[4], 5r²₂₆[4], 5r₂₆[4], 5r²₂₆[4]]
|
||||
|
||||
loop:
|
||||
CMPBLE R3, $32, b2
|
||||
VLM (R2), T_0, T_1
|
||||
SUB $32, R3
|
||||
MOVD $32(R2), R2
|
||||
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
|
||||
VLEIB $4, $1, F_4
|
||||
VLEIB $12, $1, F_4
|
||||
CMPBLE R3, $32, b2 // 2 or fewer blocks remaining, need to change key coefficients
|
||||
|
||||
// load next 2 blocks from message
|
||||
VLM (R2), T_0, T_1
|
||||
|
||||
// update message slice
|
||||
SUB $32, R3
|
||||
MOVD $32(R2), R2
|
||||
|
||||
// unpack message blocks into 26-bit big-endian limbs
|
||||
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
|
||||
|
||||
// add 2¹²⁸ to each message block value
|
||||
VLEIB $4, $1, M_4
|
||||
VLEIB $12, $1, M_4
|
||||
|
||||
multiply:
|
||||
VAG H_0, F_0, F_0
|
||||
VAG H_1, F_1, F_1
|
||||
VAG H_2, F_2, F_2
|
||||
VAG H_3, F_3, F_3
|
||||
VAG H_4, F_4, F_4
|
||||
MULTIPLY(F_0, F_1, F_2, F_3, F_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
|
||||
// accumulate the incoming message
|
||||
VAG H_0, M_0, M_0
|
||||
VAG H_3, M_3, M_3
|
||||
VAG H_1, M_1, M_1
|
||||
VAG H_4, M_4, M_4
|
||||
VAG H_2, M_2, M_2
|
||||
|
||||
// multiply the accumulator by the key coefficient
|
||||
MULTIPLY(M_0, M_1, M_2, M_3, M_4, R_0, R_1, R_2, R_3, R_4, R5_1, R5_2, R5_3, R5_4, H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
// carry and partially reduce the partial products
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
CMPBNE R3, $0, loop
|
||||
|
||||
finish:
|
||||
// sum vectors
|
||||
// sum lane 0 and lane 1 and put the result in lane 1
|
||||
VZERO T_0
|
||||
VSUMQG H_0, T_0, H_0
|
||||
VSUMQG H_1, T_0, H_1
|
||||
VSUMQG H_2, T_0, H_2
|
||||
VSUMQG H_3, T_0, H_3
|
||||
VSUMQG H_1, T_0, H_1
|
||||
VSUMQG H_4, T_0, H_4
|
||||
VSUMQG H_2, T_0, H_2
|
||||
|
||||
// h may be >= 2*(2**130-5) so we need to reduce it again
|
||||
// reduce again after summation
|
||||
// TODO(mundaym): there might be a more efficient way to do this
|
||||
// now that we only have 1 active lane. For example, we could
|
||||
// simultaneously pack the values as we reduce them.
|
||||
REDUCE(H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
// carry h1->h4
|
||||
// carry h[1] through to h[4] so that only h[4] can exceed 2²⁶ - 1
|
||||
// TODO(mundaym): in testing this final carry was unnecessary.
|
||||
// Needs a proof before it can be removed though.
|
||||
VESRLG $26, H_1, T_1
|
||||
VN MOD26, H_1, H_1
|
||||
VAQ T_1, H_2, H_2
|
||||
@@ -284,95 +367,137 @@ finish:
|
||||
VN MOD26, H_3, H_3
|
||||
VAQ T_3, H_4, H_4
|
||||
|
||||
// h is now < 2*(2**130-5)
|
||||
// pack h into h1 (hi) and h0 (lo)
|
||||
PACK(H_0, H_1, H_2, H_3, H_4)
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
MOD(H_0, H_1, T_0, T_1, T_2)
|
||||
|
||||
// h += s
|
||||
MOVD $·bswapMask<>(SB), R5
|
||||
VL (R5), T_1
|
||||
VL 16(R4), T_0
|
||||
VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
|
||||
VAQ T_0, H_0, H_0
|
||||
VPERM H_0, H_0, T_1, H_0 // reverse bytes (to little)
|
||||
VST H_0, (R1)
|
||||
// h is now < 2(2¹³⁰ - 5)
|
||||
// Pack each lane in h₂₆[0:4] into h₁₂₈[0:1].
|
||||
VESLG $26, H_1, H_1
|
||||
VESLG $26, H_3, H_3
|
||||
VO H_0, H_1, H_0
|
||||
VO H_2, H_3, H_2
|
||||
VESLG $4, H_2, H_2
|
||||
VLEIB $7, $48, H_1
|
||||
VSLB H_1, H_2, H_2
|
||||
VO H_0, H_2, H_0
|
||||
VLEIB $7, $104, H_1
|
||||
VSLB H_1, H_4, H_3
|
||||
VO H_3, H_0, H_0
|
||||
VLEIB $7, $24, H_1
|
||||
VSRLB H_1, H_4, H_1
|
||||
|
||||
// update state
|
||||
VSTEG $1, H_0, 0(R1)
|
||||
VSTEG $0, H_0, 8(R1)
|
||||
VSTEG $1, H_1, 16(R1)
|
||||
RET
|
||||
|
||||
b2:
|
||||
b2: // 2 or fewer blocks remaining
|
||||
CMPBLE R3, $16, b1
|
||||
|
||||
// 2 blocks remaining
|
||||
SUB $17, R3
|
||||
VL (R2), T_0
|
||||
VLL R3, 16(R2), T_1
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, T_1
|
||||
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $12, $1, F_4
|
||||
VLEIB $4, $1, F_4
|
||||
// Load the 2 remaining blocks (17-32 bytes remaining).
|
||||
MOVD $-17(R3), R0 // index of final byte to load modulo 16
|
||||
VL (R2), T_0 // load full 16 byte block
|
||||
VLL R0, 16(R2), T_1 // load final (possibly partial) block and pad with zeros to 16 bytes
|
||||
|
||||
// setup [r²,r]
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, RSAVE_3, R_3
|
||||
VLVGG $1, RSAVE_4, R_4
|
||||
VPDI $0, R5_1, R5SAVE_1, R5_1
|
||||
VPDI $0, R5_2, R5SAVE_2, R5_2
|
||||
VPDI $0, R5_3, R5SAVE_3, R5_3
|
||||
VPDI $0, R5_4, R5SAVE_4, R5_4
|
||||
// The Poly1305 algorithm requires that a 1 bit be appended to
|
||||
// each message block. If the final block is less than 16 bytes
|
||||
// long then it is easiest to insert the 1 before the message
|
||||
// block is split into 26-bit limbs. If, on the other hand, the
|
||||
// final message block is 16 bytes long then we append the 1 bit
|
||||
// after expansion as normal.
|
||||
MOVBZ $1, R0
|
||||
MOVD $-16(R3), R3 // index of byte in last block to insert 1 at (could be 16)
|
||||
CMPBEQ R3, $16, 2(PC) // skip the insertion if the final block is 16 bytes long
|
||||
VLVGB R3, R0, T_1 // insert 1 into the byte at index R3
|
||||
|
||||
// Split both blocks into 26-bit limbs in the appropriate lanes.
|
||||
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
|
||||
|
||||
// Append a 1 byte to the end of the second to last block.
|
||||
VLEIB $4, $1, M_4
|
||||
|
||||
// Append a 1 byte to the end of the last block only if it is a
|
||||
// full 16 byte block.
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $12, $1, M_4
|
||||
|
||||
// Finally, set up the coefficients for the final multiplication.
|
||||
// We have previously saved r and 5r in the 32-bit even indexes
|
||||
// of the R_[0-4] and R5_[1-4] coefficient registers.
|
||||
//
|
||||
// We want lane 0 to be multiplied by r² so that can be kept the
|
||||
// same. We want lane 1 to be multiplied by r so we need to move
|
||||
// the saved r value into the 32-bit odd index in lane 1 by
|
||||
// rotating the 64-bit lane by 32.
|
||||
VGBM $0x00ff, T_0 // [0, 0xffffffffffffffff] - mask lane 1 only
|
||||
VERIMG $32, R_0, T_0, R_0 // [_, r²₂₆[0], _, r₂₆[0]]
|
||||
VERIMG $32, R_1, T_0, R_1 // [_, r²₂₆[1], _, r₂₆[1]]
|
||||
VERIMG $32, R_2, T_0, R_2 // [_, r²₂₆[2], _, r₂₆[2]]
|
||||
VERIMG $32, R_3, T_0, R_3 // [_, r²₂₆[3], _, r₂₆[3]]
|
||||
VERIMG $32, R_4, T_0, R_4 // [_, r²₂₆[4], _, r₂₆[4]]
|
||||
VERIMG $32, R5_1, T_0, R5_1 // [_, 5r²₂₆[1], _, 5r₂₆[1]]
|
||||
VERIMG $32, R5_2, T_0, R5_2 // [_, 5r²₂₆[2], _, 5r₂₆[2]]
|
||||
VERIMG $32, R5_3, T_0, R5_3 // [_, 5r²₂₆[3], _, 5r₂₆[3]]
|
||||
VERIMG $32, R5_4, T_0, R5_4 // [_, 5r²₂₆[4], _, 5r₂₆[4]]
|
||||
|
||||
MOVD $0, R3
|
||||
BR multiply
|
||||
|
||||
skip:
|
||||
VZERO H_0
|
||||
VZERO H_1
|
||||
VZERO H_2
|
||||
VZERO H_3
|
||||
VZERO H_4
|
||||
|
||||
CMPBEQ R3, $0, finish
|
||||
|
||||
b1:
|
||||
// 1 block remaining
|
||||
SUB $1, R3
|
||||
VLL R3, (R2), T_0
|
||||
ADD $1, R3
|
||||
b1: // 1 block remaining
|
||||
|
||||
// Load the final block (1-16 bytes). This will be placed into
|
||||
// lane 0.
|
||||
MOVD $-1(R3), R0
|
||||
VLL R0, (R2), T_0 // pad to 16 bytes with zeros
|
||||
|
||||
// The Poly1305 algorithm requires that a 1 bit be appended to
|
||||
// each message block. If the final block is less than 16 bytes
|
||||
// long then it is easiest to insert the 1 before the message
|
||||
// block is split into 26-bit limbs. If, on the other hand, the
|
||||
// final message block is 16 bytes long then we append the 1 bit
|
||||
// after expansion as normal.
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, T_0
|
||||
VZERO T_1
|
||||
EXPAND(T_0, T_1, F_0, F_1, F_2, F_3, F_4)
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $4, $1, F_4
|
||||
VLEIG $1, $1, R_0
|
||||
VZERO R_1
|
||||
VZERO R_2
|
||||
VZERO R_3
|
||||
VZERO R_4
|
||||
VZERO R5_1
|
||||
VZERO R5_2
|
||||
VZERO R5_3
|
||||
VZERO R5_4
|
||||
|
||||
// setup [r, 1]
|
||||
VLVGG $0, RSAVE_0, R_0
|
||||
VLVGG $0, RSAVE_1, R_1
|
||||
VLVGG $0, RSAVE_2, R_2
|
||||
VLVGG $0, RSAVE_3, R_3
|
||||
VLVGG $0, RSAVE_4, R_4
|
||||
VPDI $0, R5SAVE_1, R5_1, R5_1
|
||||
VPDI $0, R5SAVE_2, R5_2, R5_2
|
||||
VPDI $0, R5SAVE_3, R5_3, R5_3
|
||||
VPDI $0, R5SAVE_4, R5_4, R5_4
|
||||
// Set the message block in lane 1 to the value 0 so that it
|
||||
// can be accumulated without affecting the final result.
|
||||
VZERO T_1
|
||||
|
||||
// Split the final message block into 26-bit limbs in lane 0.
|
||||
// Lane 1 will be contain 0.
|
||||
EXPAND(T_0, T_1, M_0, M_1, M_2, M_3, M_4)
|
||||
|
||||
// Append a 1 byte to the end of the last block only if it is a
|
||||
// full 16 byte block.
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $4, $1, M_4
|
||||
|
||||
// We have previously saved r and 5r in the 32-bit even indexes
|
||||
// of the R_[0-4] and R5_[1-4] coefficient registers.
|
||||
//
|
||||
// We want lane 0 to be multiplied by r so we need to move the
|
||||
// saved r value into the 32-bit odd index in lane 0. We want
|
||||
// lane 1 to be set to the value 1. This makes multiplication
|
||||
// a no-op. We do this by setting lane 1 in every register to 0
|
||||
// and then just setting the 32-bit index 3 in R_0 to 1.
|
||||
VZERO T_0
|
||||
MOVD $0, R0
|
||||
MOVD $0x10111213, R12
|
||||
VLVGP R12, R0, T_1 // [_, 0x10111213, _, 0x00000000]
|
||||
VPERM T_0, R_0, T_1, R_0 // [_, r₂₆[0], _, 0]
|
||||
VPERM T_0, R_1, T_1, R_1 // [_, r₂₆[1], _, 0]
|
||||
VPERM T_0, R_2, T_1, R_2 // [_, r₂₆[2], _, 0]
|
||||
VPERM T_0, R_3, T_1, R_3 // [_, r₂₆[3], _, 0]
|
||||
VPERM T_0, R_4, T_1, R_4 // [_, r₂₆[4], _, 0]
|
||||
VPERM T_0, R5_1, T_1, R5_1 // [_, 5r₂₆[1], _, 0]
|
||||
VPERM T_0, R5_2, T_1, R5_2 // [_, 5r₂₆[2], _, 0]
|
||||
VPERM T_0, R5_3, T_1, R5_3 // [_, 5r₂₆[3], _, 0]
|
||||
VPERM T_0, R5_4, T_1, R5_4 // [_, 5r₂₆[4], _, 0]
|
||||
|
||||
// Set the value of lane 1 to be 1.
|
||||
VLEIF $3, $1, R_0 // [_, r₂₆[0], _, 1]
|
||||
|
||||
MOVD $0, R3
|
||||
BR multiply
|
||||
|
909
vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s
generated
vendored
909
vendor/golang.org/x/crypto/poly1305/sum_vmsl_s390x.s
generated
vendored
@@ -1,909 +0,0 @@
|
||||
// Copyright 2018 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build s390x,go1.11,!gccgo,!appengine
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// Implementation of Poly1305 using the vector facility (vx) and the VMSL instruction.
|
||||
|
||||
// constants
|
||||
#define EX0 V1
|
||||
#define EX1 V2
|
||||
#define EX2 V3
|
||||
|
||||
// temporaries
|
||||
#define T_0 V4
|
||||
#define T_1 V5
|
||||
#define T_2 V6
|
||||
#define T_3 V7
|
||||
#define T_4 V8
|
||||
#define T_5 V9
|
||||
#define T_6 V10
|
||||
#define T_7 V11
|
||||
#define T_8 V12
|
||||
#define T_9 V13
|
||||
#define T_10 V14
|
||||
|
||||
// r**2 & r**4
|
||||
#define R_0 V15
|
||||
#define R_1 V16
|
||||
#define R_2 V17
|
||||
#define R5_1 V18
|
||||
#define R5_2 V19
|
||||
// key (r)
|
||||
#define RSAVE_0 R7
|
||||
#define RSAVE_1 R8
|
||||
#define RSAVE_2 R9
|
||||
#define R5SAVE_1 R10
|
||||
#define R5SAVE_2 R11
|
||||
|
||||
// message block
|
||||
#define M0 V20
|
||||
#define M1 V21
|
||||
#define M2 V22
|
||||
#define M3 V23
|
||||
#define M4 V24
|
||||
#define M5 V25
|
||||
|
||||
// accumulator
|
||||
#define H0_0 V26
|
||||
#define H1_0 V27
|
||||
#define H2_0 V28
|
||||
#define H0_1 V29
|
||||
#define H1_1 V30
|
||||
#define H2_1 V31
|
||||
|
||||
GLOBL ·keyMask<>(SB), RODATA, $16
|
||||
DATA ·keyMask<>+0(SB)/8, $0xffffff0ffcffff0f
|
||||
DATA ·keyMask<>+8(SB)/8, $0xfcffff0ffcffff0f
|
||||
|
||||
GLOBL ·bswapMask<>(SB), RODATA, $16
|
||||
DATA ·bswapMask<>+0(SB)/8, $0x0f0e0d0c0b0a0908
|
||||
DATA ·bswapMask<>+8(SB)/8, $0x0706050403020100
|
||||
|
||||
GLOBL ·constants<>(SB), RODATA, $48
|
||||
// EX0
|
||||
DATA ·constants<>+0(SB)/8, $0x18191a1b1c1d1e1f
|
||||
DATA ·constants<>+8(SB)/8, $0x0000050403020100
|
||||
// EX1
|
||||
DATA ·constants<>+16(SB)/8, $0x18191a1b1c1d1e1f
|
||||
DATA ·constants<>+24(SB)/8, $0x00000a0908070605
|
||||
// EX2
|
||||
DATA ·constants<>+32(SB)/8, $0x18191a1b1c1d1e1f
|
||||
DATA ·constants<>+40(SB)/8, $0x0000000f0e0d0c0b
|
||||
|
||||
GLOBL ·c<>(SB), RODATA, $48
|
||||
// EX0
|
||||
DATA ·c<>+0(SB)/8, $0x0000050403020100
|
||||
DATA ·c<>+8(SB)/8, $0x0000151413121110
|
||||
// EX1
|
||||
DATA ·c<>+16(SB)/8, $0x00000a0908070605
|
||||
DATA ·c<>+24(SB)/8, $0x00001a1918171615
|
||||
// EX2
|
||||
DATA ·c<>+32(SB)/8, $0x0000000f0e0d0c0b
|
||||
DATA ·c<>+40(SB)/8, $0x0000001f1e1d1c1b
|
||||
|
||||
GLOBL ·reduce<>(SB), RODATA, $32
|
||||
// 44 bit
|
||||
DATA ·reduce<>+0(SB)/8, $0x0
|
||||
DATA ·reduce<>+8(SB)/8, $0xfffffffffff
|
||||
// 42 bit
|
||||
DATA ·reduce<>+16(SB)/8, $0x0
|
||||
DATA ·reduce<>+24(SB)/8, $0x3ffffffffff
|
||||
|
||||
// h = (f*g) % (2**130-5) [partial reduction]
|
||||
// uses T_0...T_9 temporary registers
|
||||
// input: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2
|
||||
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9
|
||||
// output: m02_0, m02_1, m02_2, m13_0, m13_1, m13_2
|
||||
#define MULTIPLY(m02_0, m02_1, m02_2, m13_0, m13_1, m13_2, r_0, r_1, r_2, r5_1, r5_2, m4_0, m4_1, m4_2, m5_0, m5_1, m5_2, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9) \
|
||||
\ // Eliminate the dependency for the last 2 VMSLs
|
||||
VMSLG m02_0, r_2, m4_2, m4_2 \
|
||||
VMSLG m13_0, r_2, m5_2, m5_2 \ // 8 VMSLs pipelined
|
||||
VMSLG m02_0, r_0, m4_0, m4_0 \
|
||||
VMSLG m02_1, r5_2, V0, T_0 \
|
||||
VMSLG m02_0, r_1, m4_1, m4_1 \
|
||||
VMSLG m02_1, r_0, V0, T_1 \
|
||||
VMSLG m02_1, r_1, V0, T_2 \
|
||||
VMSLG m02_2, r5_1, V0, T_3 \
|
||||
VMSLG m02_2, r5_2, V0, T_4 \
|
||||
VMSLG m13_0, r_0, m5_0, m5_0 \
|
||||
VMSLG m13_1, r5_2, V0, T_5 \
|
||||
VMSLG m13_0, r_1, m5_1, m5_1 \
|
||||
VMSLG m13_1, r_0, V0, T_6 \
|
||||
VMSLG m13_1, r_1, V0, T_7 \
|
||||
VMSLG m13_2, r5_1, V0, T_8 \
|
||||
VMSLG m13_2, r5_2, V0, T_9 \
|
||||
VMSLG m02_2, r_0, m4_2, m4_2 \
|
||||
VMSLG m13_2, r_0, m5_2, m5_2 \
|
||||
VAQ m4_0, T_0, m02_0 \
|
||||
VAQ m4_1, T_1, m02_1 \
|
||||
VAQ m5_0, T_5, m13_0 \
|
||||
VAQ m5_1, T_6, m13_1 \
|
||||
VAQ m02_0, T_3, m02_0 \
|
||||
VAQ m02_1, T_4, m02_1 \
|
||||
VAQ m13_0, T_8, m13_0 \
|
||||
VAQ m13_1, T_9, m13_1 \
|
||||
VAQ m4_2, T_2, m02_2 \
|
||||
VAQ m5_2, T_7, m13_2 \
|
||||
|
||||
// SQUARE uses three limbs of r and r_2*5 to output square of r
|
||||
// uses T_1, T_5 and T_7 temporary registers
|
||||
// input: r_0, r_1, r_2, r5_2
|
||||
// temp: TEMP0, TEMP1, TEMP2
|
||||
// output: p0, p1, p2
|
||||
#define SQUARE(r_0, r_1, r_2, r5_2, p0, p1, p2, TEMP0, TEMP1, TEMP2) \
|
||||
VMSLG r_0, r_0, p0, p0 \
|
||||
VMSLG r_1, r5_2, V0, TEMP0 \
|
||||
VMSLG r_2, r5_2, p1, p1 \
|
||||
VMSLG r_0, r_1, V0, TEMP1 \
|
||||
VMSLG r_1, r_1, p2, p2 \
|
||||
VMSLG r_0, r_2, V0, TEMP2 \
|
||||
VAQ TEMP0, p0, p0 \
|
||||
VAQ TEMP1, p1, p1 \
|
||||
VAQ TEMP2, p2, p2 \
|
||||
VAQ TEMP0, p0, p0 \
|
||||
VAQ TEMP1, p1, p1 \
|
||||
VAQ TEMP2, p2, p2 \
|
||||
|
||||
// carry h0->h1->h2->h0 || h3->h4->h5->h3
|
||||
// uses T_2, T_4, T_5, T_7, T_8, T_9
|
||||
// t6, t7, t8, t9, t10, t11
|
||||
// input: h0, h1, h2, h3, h4, h5
|
||||
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11
|
||||
// output: h0, h1, h2, h3, h4, h5
|
||||
#define REDUCE(h0, h1, h2, h3, h4, h5, t0, t1, t2, t3, t4, t5, t6, t7, t8, t9, t10, t11) \
|
||||
VLM (R12), t6, t7 \ // 44 and 42 bit clear mask
|
||||
VLEIB $7, $0x28, t10 \ // 5 byte shift mask
|
||||
VREPIB $4, t8 \ // 4 bit shift mask
|
||||
VREPIB $2, t11 \ // 2 bit shift mask
|
||||
VSRLB t10, h0, t0 \ // h0 byte shift
|
||||
VSRLB t10, h1, t1 \ // h1 byte shift
|
||||
VSRLB t10, h2, t2 \ // h2 byte shift
|
||||
VSRLB t10, h3, t3 \ // h3 byte shift
|
||||
VSRLB t10, h4, t4 \ // h4 byte shift
|
||||
VSRLB t10, h5, t5 \ // h5 byte shift
|
||||
VSRL t8, t0, t0 \ // h0 bit shift
|
||||
VSRL t8, t1, t1 \ // h2 bit shift
|
||||
VSRL t11, t2, t2 \ // h2 bit shift
|
||||
VSRL t8, t3, t3 \ // h3 bit shift
|
||||
VSRL t8, t4, t4 \ // h4 bit shift
|
||||
VESLG $2, t2, t9 \ // h2 carry x5
|
||||
VSRL t11, t5, t5 \ // h5 bit shift
|
||||
VN t6, h0, h0 \ // h0 clear carry
|
||||
VAQ t2, t9, t2 \ // h2 carry x5
|
||||
VESLG $2, t5, t9 \ // h5 carry x5
|
||||
VN t6, h1, h1 \ // h1 clear carry
|
||||
VN t7, h2, h2 \ // h2 clear carry
|
||||
VAQ t5, t9, t5 \ // h5 carry x5
|
||||
VN t6, h3, h3 \ // h3 clear carry
|
||||
VN t6, h4, h4 \ // h4 clear carry
|
||||
VN t7, h5, h5 \ // h5 clear carry
|
||||
VAQ t0, h1, h1 \ // h0->h1
|
||||
VAQ t3, h4, h4 \ // h3->h4
|
||||
VAQ t1, h2, h2 \ // h1->h2
|
||||
VAQ t4, h5, h5 \ // h4->h5
|
||||
VAQ t2, h0, h0 \ // h2->h0
|
||||
VAQ t5, h3, h3 \ // h5->h3
|
||||
VREPG $1, t6, t6 \ // 44 and 42 bit masks across both halves
|
||||
VREPG $1, t7, t7 \
|
||||
VSLDB $8, h0, h0, h0 \ // set up [h0/1/2, h3/4/5]
|
||||
VSLDB $8, h1, h1, h1 \
|
||||
VSLDB $8, h2, h2, h2 \
|
||||
VO h0, h3, h3 \
|
||||
VO h1, h4, h4 \
|
||||
VO h2, h5, h5 \
|
||||
VESRLG $44, h3, t0 \ // 44 bit shift right
|
||||
VESRLG $44, h4, t1 \
|
||||
VESRLG $42, h5, t2 \
|
||||
VN t6, h3, h3 \ // clear carry bits
|
||||
VN t6, h4, h4 \
|
||||
VN t7, h5, h5 \
|
||||
VESLG $2, t2, t9 \ // multiply carry by 5
|
||||
VAQ t9, t2, t2 \
|
||||
VAQ t0, h4, h4 \
|
||||
VAQ t1, h5, h5 \
|
||||
VAQ t2, h3, h3 \
|
||||
|
||||
// carry h0->h1->h2->h0
|
||||
// input: h0, h1, h2
|
||||
// temp: t0, t1, t2, t3, t4, t5, t6, t7, t8
|
||||
// output: h0, h1, h2
|
||||
#define REDUCE2(h0, h1, h2, t0, t1, t2, t3, t4, t5, t6, t7, t8) \
|
||||
VLEIB $7, $0x28, t3 \ // 5 byte shift mask
|
||||
VREPIB $4, t4 \ // 4 bit shift mask
|
||||
VREPIB $2, t7 \ // 2 bit shift mask
|
||||
VGBM $0x003F, t5 \ // mask to clear carry bits
|
||||
VSRLB t3, h0, t0 \
|
||||
VSRLB t3, h1, t1 \
|
||||
VSRLB t3, h2, t2 \
|
||||
VESRLG $4, t5, t5 \ // 44 bit clear mask
|
||||
VSRL t4, t0, t0 \
|
||||
VSRL t4, t1, t1 \
|
||||
VSRL t7, t2, t2 \
|
||||
VESRLG $2, t5, t6 \ // 42 bit clear mask
|
||||
VESLG $2, t2, t8 \
|
||||
VAQ t8, t2, t2 \
|
||||
VN t5, h0, h0 \
|
||||
VN t5, h1, h1 \
|
||||
VN t6, h2, h2 \
|
||||
VAQ t0, h1, h1 \
|
||||
VAQ t1, h2, h2 \
|
||||
VAQ t2, h0, h0 \
|
||||
VSRLB t3, h0, t0 \
|
||||
VSRLB t3, h1, t1 \
|
||||
VSRLB t3, h2, t2 \
|
||||
VSRL t4, t0, t0 \
|
||||
VSRL t4, t1, t1 \
|
||||
VSRL t7, t2, t2 \
|
||||
VN t5, h0, h0 \
|
||||
VN t5, h1, h1 \
|
||||
VESLG $2, t2, t8 \
|
||||
VN t6, h2, h2 \
|
||||
VAQ t0, h1, h1 \
|
||||
VAQ t8, t2, t2 \
|
||||
VAQ t1, h2, h2 \
|
||||
VAQ t2, h0, h0 \
|
||||
|
||||
// expands two message blocks into the lower halfs of the d registers
|
||||
// moves the contents of the d registers into upper halfs
|
||||
// input: in1, in2, d0, d1, d2, d3, d4, d5
|
||||
// temp: TEMP0, TEMP1, TEMP2, TEMP3
|
||||
// output: d0, d1, d2, d3, d4, d5
|
||||
#define EXPACC(in1, in2, d0, d1, d2, d3, d4, d5, TEMP0, TEMP1, TEMP2, TEMP3) \
|
||||
VGBM $0xff3f, TEMP0 \
|
||||
VGBM $0xff1f, TEMP1 \
|
||||
VESLG $4, d1, TEMP2 \
|
||||
VESLG $4, d4, TEMP3 \
|
||||
VESRLG $4, TEMP0, TEMP0 \
|
||||
VPERM in1, d0, EX0, d0 \
|
||||
VPERM in2, d3, EX0, d3 \
|
||||
VPERM in1, d2, EX2, d2 \
|
||||
VPERM in2, d5, EX2, d5 \
|
||||
VPERM in1, TEMP2, EX1, d1 \
|
||||
VPERM in2, TEMP3, EX1, d4 \
|
||||
VN TEMP0, d0, d0 \
|
||||
VN TEMP0, d3, d3 \
|
||||
VESRLG $4, d1, d1 \
|
||||
VESRLG $4, d4, d4 \
|
||||
VN TEMP1, d2, d2 \
|
||||
VN TEMP1, d5, d5 \
|
||||
VN TEMP0, d1, d1 \
|
||||
VN TEMP0, d4, d4 \
|
||||
|
||||
// expands one message block into the lower halfs of the d registers
|
||||
// moves the contents of the d registers into upper halfs
|
||||
// input: in, d0, d1, d2
|
||||
// temp: TEMP0, TEMP1, TEMP2
|
||||
// output: d0, d1, d2
|
||||
#define EXPACC2(in, d0, d1, d2, TEMP0, TEMP1, TEMP2) \
|
||||
VGBM $0xff3f, TEMP0 \
|
||||
VESLG $4, d1, TEMP2 \
|
||||
VGBM $0xff1f, TEMP1 \
|
||||
VPERM in, d0, EX0, d0 \
|
||||
VESRLG $4, TEMP0, TEMP0 \
|
||||
VPERM in, d2, EX2, d2 \
|
||||
VPERM in, TEMP2, EX1, d1 \
|
||||
VN TEMP0, d0, d0 \
|
||||
VN TEMP1, d2, d2 \
|
||||
VESRLG $4, d1, d1 \
|
||||
VN TEMP0, d1, d1 \
|
||||
|
||||
// pack h2:h0 into h1:h0 (no carry)
|
||||
// input: h0, h1, h2
|
||||
// output: h0, h1, h2
|
||||
#define PACK(h0, h1, h2) \
|
||||
VMRLG h1, h2, h2 \ // copy h1 to upper half h2
|
||||
VESLG $44, h1, h1 \ // shift limb 1 44 bits, leaving 20
|
||||
VO h0, h1, h0 \ // combine h0 with 20 bits from limb 1
|
||||
VESRLG $20, h2, h1 \ // put top 24 bits of limb 1 into h1
|
||||
VLEIG $1, $0, h1 \ // clear h2 stuff from lower half of h1
|
||||
VO h0, h1, h0 \ // h0 now has 88 bits (limb 0 and 1)
|
||||
VLEIG $0, $0, h2 \ // clear upper half of h2
|
||||
VESRLG $40, h2, h1 \ // h1 now has upper two bits of result
|
||||
VLEIB $7, $88, h1 \ // for byte shift (11 bytes)
|
||||
VSLB h1, h2, h2 \ // shift h2 11 bytes to the left
|
||||
VO h0, h2, h0 \ // combine h0 with 20 bits from limb 1
|
||||
VLEIG $0, $0, h1 \ // clear upper half of h1
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
// input: h0, h1
|
||||
// temp: t0, t1, t2
|
||||
// output: h0
|
||||
#define MOD(h0, h1, t0, t1, t2) \
|
||||
VZERO t0 \
|
||||
VLEIG $1, $5, t0 \
|
||||
VACCQ h0, t0, t1 \
|
||||
VAQ h0, t0, t0 \
|
||||
VONE t2 \
|
||||
VLEIG $1, $-4, t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VACCQ h1, t1, t1 \
|
||||
VONE t2 \
|
||||
VAQ t2, t1, t1 \
|
||||
VN h0, t1, t2 \
|
||||
VNC t0, t1, t1 \
|
||||
VO t1, t2, h0 \
|
||||
|
||||
// func poly1305vmsl(out *[16]byte, m *byte, mlen uint64, key *[32]key)
|
||||
TEXT ·poly1305vmsl(SB), $0-32
|
||||
// This code processes 6 + up to 4 blocks (32 bytes) per iteration
|
||||
// using the algorithm described in:
|
||||
// NEON crypto, Daniel J. Bernstein & Peter Schwabe
|
||||
// https://cryptojedi.org/papers/neoncrypto-20120320.pdf
|
||||
// And as moddified for VMSL as described in
|
||||
// Accelerating Poly1305 Cryptographic Message Authentication on the z14
|
||||
// O'Farrell et al, CASCON 2017, p48-55
|
||||
// https://ibm.ent.box.com/s/jf9gedj0e9d2vjctfyh186shaztavnht
|
||||
|
||||
LMG out+0(FP), R1, R4 // R1=out, R2=m, R3=mlen, R4=key
|
||||
VZERO V0 // c
|
||||
|
||||
// load EX0, EX1 and EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), EX0, EX2 // c
|
||||
|
||||
// setup r
|
||||
VL (R4), T_0
|
||||
MOVD $·keyMask<>(SB), R6
|
||||
VL (R6), T_1
|
||||
VN T_0, T_1, T_0
|
||||
VZERO T_2 // limbs for r
|
||||
VZERO T_3
|
||||
VZERO T_4
|
||||
EXPACC2(T_0, T_2, T_3, T_4, T_1, T_5, T_7)
|
||||
|
||||
// T_2, T_3, T_4: [0, r]
|
||||
|
||||
// setup r*20
|
||||
VLEIG $0, $0, T_0
|
||||
VLEIG $1, $20, T_0 // T_0: [0, 20]
|
||||
VZERO T_5
|
||||
VZERO T_6
|
||||
VMSLG T_0, T_3, T_5, T_5
|
||||
VMSLG T_0, T_4, T_6, T_6
|
||||
|
||||
// store r for final block in GR
|
||||
VLGVG $1, T_2, RSAVE_0 // c
|
||||
VLGVG $1, T_3, RSAVE_1 // c
|
||||
VLGVG $1, T_4, RSAVE_2 // c
|
||||
VLGVG $1, T_5, R5SAVE_1 // c
|
||||
VLGVG $1, T_6, R5SAVE_2 // c
|
||||
|
||||
// initialize h
|
||||
VZERO H0_0
|
||||
VZERO H1_0
|
||||
VZERO H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
// initialize pointer for reduce constants
|
||||
MOVD $·reduce<>(SB), R12
|
||||
|
||||
// calculate r**2 and 20*(r**2)
|
||||
VZERO R_0
|
||||
VZERO R_1
|
||||
VZERO R_2
|
||||
SQUARE(T_2, T_3, T_4, T_6, R_0, R_1, R_2, T_1, T_5, T_7)
|
||||
REDUCE2(R_0, R_1, R_2, M0, M1, M2, M3, M4, R5_1, R5_2, M5, T_1)
|
||||
VZERO R5_1
|
||||
VZERO R5_2
|
||||
VMSLG T_0, R_1, R5_1, R5_1
|
||||
VMSLG T_0, R_2, R5_2, R5_2
|
||||
|
||||
// skip r**4 calculation if 3 blocks or less
|
||||
CMPBLE R3, $48, b4
|
||||
|
||||
// calculate r**4 and 20*(r**4)
|
||||
VZERO T_8
|
||||
VZERO T_9
|
||||
VZERO T_10
|
||||
SQUARE(R_0, R_1, R_2, R5_2, T_8, T_9, T_10, T_1, T_5, T_7)
|
||||
REDUCE2(T_8, T_9, T_10, M0, M1, M2, M3, M4, T_2, T_3, M5, T_1)
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
VMSLG T_0, T_9, T_2, T_2
|
||||
VMSLG T_0, T_10, T_3, T_3
|
||||
|
||||
// put r**2 to the right and r**4 to the left of R_0, R_1, R_2
|
||||
VSLDB $8, T_8, T_8, T_8
|
||||
VSLDB $8, T_9, T_9, T_9
|
||||
VSLDB $8, T_10, T_10, T_10
|
||||
VSLDB $8, T_2, T_2, T_2
|
||||
VSLDB $8, T_3, T_3, T_3
|
||||
|
||||
VO T_8, R_0, R_0
|
||||
VO T_9, R_1, R_1
|
||||
VO T_10, R_2, R_2
|
||||
VO T_2, R5_1, R5_1
|
||||
VO T_3, R5_2, R5_2
|
||||
|
||||
CMPBLE R3, $80, load // less than or equal to 5 blocks in message
|
||||
|
||||
// 6(or 5+1) blocks
|
||||
SUB $81, R3
|
||||
VLM (R2), M0, M4
|
||||
VLL R3, 80(R2), M5
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBGE R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M5
|
||||
MOVD $96(R2), R2
|
||||
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
|
||||
EXPACC(M2, M3, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
|
||||
VLEIB $2, $1, H2_0
|
||||
VLEIB $2, $1, H2_1
|
||||
VLEIB $10, $1, H2_0
|
||||
VLEIB $10, $1, H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO T_4
|
||||
VZERO T_10
|
||||
EXPACC(M4, M5, M0, M1, M2, M3, T_4, T_10, T_0, T_1, T_2, T_3)
|
||||
VLR T_4, M4
|
||||
VLEIB $10, $1, M2
|
||||
CMPBLT R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_10
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
|
||||
SUB $16, R3
|
||||
CMPBLE R3, $0, square
|
||||
|
||||
load:
|
||||
// load EX0, EX1 and EX2
|
||||
MOVD $·c<>(SB), R5
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
loop:
|
||||
CMPBLE R3, $64, add // b4 // last 4 or less blocks left
|
||||
|
||||
// next 4 full blocks
|
||||
VLM (R2), M2, M5
|
||||
SUB $64, R3
|
||||
MOVD $64(R2), R2
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, T_0, T_1, T_3, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
|
||||
// expacc in-lined to create [m2, m3] limbs
|
||||
VGBM $0x3f3f, T_0 // 44 bit clear mask
|
||||
VGBM $0x1f1f, T_1 // 40 bit clear mask
|
||||
VPERM M2, M3, EX0, T_3
|
||||
VESRLG $4, T_0, T_0 // 44 bit clear mask ready
|
||||
VPERM M2, M3, EX1, T_4
|
||||
VPERM M2, M3, EX2, T_5
|
||||
VN T_0, T_3, T_3
|
||||
VESRLG $4, T_4, T_4
|
||||
VN T_1, T_5, T_5
|
||||
VN T_0, T_4, T_4
|
||||
VMRHG H0_1, T_3, H0_0
|
||||
VMRHG H1_1, T_4, H1_0
|
||||
VMRHG H2_1, T_5, H2_0
|
||||
VMRLG H0_1, T_3, H0_1
|
||||
VMRLG H1_1, T_4, H1_1
|
||||
VMRLG H2_1, T_5, H2_1
|
||||
VLEIB $10, $1, H2_0
|
||||
VLEIB $10, $1, H2_1
|
||||
VPERM M4, M5, EX0, T_3
|
||||
VPERM M4, M5, EX1, T_4
|
||||
VPERM M4, M5, EX2, T_5
|
||||
VN T_0, T_3, T_3
|
||||
VESRLG $4, T_4, T_4
|
||||
VN T_1, T_5, T_5
|
||||
VN T_0, T_4, T_4
|
||||
VMRHG V0, T_3, M0
|
||||
VMRHG V0, T_4, M1
|
||||
VMRHG V0, T_5, M2
|
||||
VMRLG V0, T_3, M3
|
||||
VMRLG V0, T_4, M4
|
||||
VMRLG V0, T_5, M5
|
||||
VLEIB $10, $1, M2
|
||||
VLEIB $10, $1, M5
|
||||
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
CMPBNE R3, $0, loop
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
|
||||
// load EX0, EX1, EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
// sum vectors
|
||||
VAQ H0_0, H0_1, H0_0
|
||||
VAQ H1_0, H1_1, H1_0
|
||||
VAQ H2_0, H2_1, H2_0
|
||||
|
||||
// h may be >= 2*(2**130-5) so we need to reduce it again
|
||||
// M0...M4 are used as temps here
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
|
||||
next: // carry h1->h2
|
||||
VLEIB $7, $0x28, T_1
|
||||
VREPIB $4, T_2
|
||||
VGBM $0x003F, T_3
|
||||
VESRLG $4, T_3
|
||||
|
||||
// byte shift
|
||||
VSRLB T_1, H1_0, T_4
|
||||
|
||||
// bit shift
|
||||
VSRL T_2, T_4, T_4
|
||||
|
||||
// clear h1 carry bits
|
||||
VN T_3, H1_0, H1_0
|
||||
|
||||
// add carry
|
||||
VAQ T_4, H2_0, H2_0
|
||||
|
||||
// h is now < 2*(2**130-5)
|
||||
// pack h into h1 (hi) and h0 (lo)
|
||||
PACK(H0_0, H1_0, H2_0)
|
||||
|
||||
// if h > 2**130-5 then h -= 2**130-5
|
||||
MOD(H0_0, H1_0, T_0, T_1, T_2)
|
||||
|
||||
// h += s
|
||||
MOVD $·bswapMask<>(SB), R5
|
||||
VL (R5), T_1
|
||||
VL 16(R4), T_0
|
||||
VPERM T_0, T_0, T_1, T_0 // reverse bytes (to big)
|
||||
VAQ T_0, H0_0, H0_0
|
||||
VPERM H0_0, H0_0, T_1, H0_0 // reverse bytes (to little)
|
||||
VST H0_0, (R1)
|
||||
RET
|
||||
|
||||
add:
|
||||
// load EX0, EX1, EX2
|
||||
MOVD $·constants<>(SB), R5
|
||||
VLM (R5), EX0, EX2
|
||||
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
CMPBLE R3, $64, b4
|
||||
|
||||
b4:
|
||||
CMPBLE R3, $48, b3 // 3 blocks or less
|
||||
|
||||
// 4(3+1) blocks remaining
|
||||
SUB $49, R3
|
||||
VLM (R2), M0, M2
|
||||
VLL R3, 48(R2), M3
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M3
|
||||
MOVD $64(R2), R2
|
||||
EXPACC(M0, M1, H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_0, T_1, T_2, T_3)
|
||||
VLEIB $10, $1, H2_0
|
||||
VLEIB $10, $1, H2_1
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
VZERO T_4
|
||||
VZERO T_10
|
||||
EXPACC(M2, M3, M0, M1, M4, M5, T_4, T_10, T_0, T_1, T_2, T_3)
|
||||
VLR T_4, M2
|
||||
VLEIB $10, $1, M4
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_10
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M4, M5, M2, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M3, M4, M5, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
SUB $16, R3
|
||||
CMPBLE R3, $0, square // this condition must always hold true!
|
||||
|
||||
b3:
|
||||
CMPBLE R3, $32, b2
|
||||
|
||||
// 3 blocks remaining
|
||||
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// H*[r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, T_10, M5)
|
||||
|
||||
SUB $33, R3
|
||||
VLM (R2), M0, M1
|
||||
VLL R3, 32(R2), M2
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M2
|
||||
|
||||
// H += m0
|
||||
VZERO T_1
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)
|
||||
VLEIB $10, $1, T_3
|
||||
VAG H0_0, T_1, H0_0
|
||||
VAG H1_0, T_2, H1_0
|
||||
VAG H2_0, T_3, H2_0
|
||||
|
||||
VZERO M0
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
VZERO T_10
|
||||
|
||||
// (H+m0)*r
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M3, M4, M5, V0, T_10, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_10, H0_1, H1_1, H2_1, T_9)
|
||||
|
||||
// H += m1
|
||||
VZERO V0
|
||||
VZERO T_1
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
EXPACC2(M1, T_1, T_2, T_3, T_4, T_5, T_6)
|
||||
VLEIB $10, $1, T_3
|
||||
VAQ H0_0, T_1, H0_0
|
||||
VAQ H1_0, T_2, H1_0
|
||||
VAQ H2_0, T_3, H2_0
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
|
||||
|
||||
// [H, m2] * [r**2, r]
|
||||
EXPACC2(M2, H0_0, H1_0, H2_0, T_1, T_2, T_3)
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, H2_0
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, H0_1, H1_1, M5, T_10)
|
||||
SUB $16, R3
|
||||
CMPBLE R3, $0, next // this condition must always hold true!
|
||||
|
||||
b2:
|
||||
CMPBLE R3, $16, b1
|
||||
|
||||
// 2 blocks remaining
|
||||
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// H*[r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, T_10, M0, M1, M2, M3, M4, T_4, T_5, T_2, T_7, T_8, T_9)
|
||||
VMRHG V0, H0_1, H0_0
|
||||
VMRHG V0, H1_1, H1_0
|
||||
VMRHG V0, H2_1, H2_0
|
||||
VMRLG V0, H0_1, H0_1
|
||||
VMRLG V0, H1_1, H1_1
|
||||
VMRLG V0, H2_1, H2_1
|
||||
|
||||
// move h to the left and 0s at the right
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
|
||||
// get message blocks and append 1 to start
|
||||
SUB $17, R3
|
||||
VL (R2), M0
|
||||
VLL R3, 16(R2), M1
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M1
|
||||
VZERO T_6
|
||||
VZERO T_7
|
||||
VZERO T_8
|
||||
EXPACC2(M0, T_6, T_7, T_8, T_1, T_2, T_3)
|
||||
EXPACC2(M1, T_6, T_7, T_8, T_1, T_2, T_3)
|
||||
VLEIB $2, $1, T_8
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_8
|
||||
|
||||
// add [m0, m1] to h
|
||||
VAG H0_0, T_6, H0_0
|
||||
VAG H1_0, T_7, H1_0
|
||||
VAG H2_0, T_8, H2_0
|
||||
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
VZERO T_10
|
||||
VZERO M0
|
||||
|
||||
// at this point R_0 .. R5_2 look like [r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M2, M3, M4, M5, T_10, M0, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M2, M3, M4, M5, T_9, H0_1, H1_1, H2_1, T_10)
|
||||
SUB $16, R3, R3
|
||||
CMPBLE R3, $0, next
|
||||
|
||||
b1:
|
||||
CMPBLE R3, $0, next
|
||||
|
||||
// 1 block remaining
|
||||
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// H*[r**2, r]
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
|
||||
// set up [0, m0] limbs
|
||||
SUB $1, R3
|
||||
VLL R3, (R2), M0
|
||||
ADD $1, R3
|
||||
MOVBZ $1, R0
|
||||
CMPBEQ R3, $16, 2(PC)
|
||||
VLVGB R3, R0, M0
|
||||
VZERO T_1
|
||||
VZERO T_2
|
||||
VZERO T_3
|
||||
EXPACC2(M0, T_1, T_2, T_3, T_4, T_5, T_6)// limbs: [0, m]
|
||||
CMPBNE R3, $16, 2(PC)
|
||||
VLEIB $10, $1, T_3
|
||||
|
||||
// h+m0
|
||||
VAQ H0_0, T_1, H0_0
|
||||
VAQ H1_0, T_2, H1_0
|
||||
VAQ H2_0, T_3, H2_0
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
|
||||
BR next
|
||||
|
||||
square:
|
||||
// setup [r²,r]
|
||||
VSLDB $8, R_0, R_0, R_0
|
||||
VSLDB $8, R_1, R_1, R_1
|
||||
VSLDB $8, R_2, R_2, R_2
|
||||
VSLDB $8, R5_1, R5_1, R5_1
|
||||
VSLDB $8, R5_2, R5_2, R5_2
|
||||
|
||||
VLVGG $1, RSAVE_0, R_0
|
||||
VLVGG $1, RSAVE_1, R_1
|
||||
VLVGG $1, RSAVE_2, R_2
|
||||
VLVGG $1, R5SAVE_1, R5_1
|
||||
VLVGG $1, R5SAVE_2, R5_2
|
||||
|
||||
// setup [h0, h1]
|
||||
VSLDB $8, H0_0, H0_0, H0_0
|
||||
VSLDB $8, H1_0, H1_0, H1_0
|
||||
VSLDB $8, H2_0, H2_0, H2_0
|
||||
VO H0_1, H0_0, H0_0
|
||||
VO H1_1, H1_0, H1_0
|
||||
VO H2_1, H2_0, H2_0
|
||||
VZERO H0_1
|
||||
VZERO H1_1
|
||||
VZERO H2_1
|
||||
|
||||
VZERO M0
|
||||
VZERO M1
|
||||
VZERO M2
|
||||
VZERO M3
|
||||
VZERO M4
|
||||
VZERO M5
|
||||
|
||||
// (h0*r**2) + (h1*r)
|
||||
MULTIPLY(H0_0, H1_0, H2_0, H0_1, H1_1, H2_1, R_0, R_1, R_2, R5_1, R5_2, M0, M1, M2, M3, M4, M5, T_0, T_1, T_2, T_3, T_4, T_5, T_6, T_7, T_8, T_9)
|
||||
REDUCE2(H0_0, H1_0, H2_0, M0, M1, M2, M3, M4, T_9, T_10, H0_1, M5)
|
||||
BR next
|
4
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
4
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
@@ -414,8 +414,8 @@ func (c *CertChecker) CheckCert(principal string, cert *Certificate) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignCert sets c.SignatureKey to the authority's public key and stores a
|
||||
// Signature, by authority, in the certificate.
|
||||
// SignCert signs the certificate with an authority, setting the Nonce,
|
||||
// SignatureKey, and Signature fields.
|
||||
func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
|
||||
c.Nonce = make([]byte, 32)
|
||||
if _, err := io.ReadFull(rand, c.Nonce); err != nil {
|
||||
|
2
vendor/golang.org/x/crypto/ssh/cipher.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/cipher.go
generated
vendored
@@ -119,7 +119,7 @@ var cipherModes = map[string]*cipherMode{
|
||||
chacha20Poly1305ID: {64, 0, newChaCha20Cipher},
|
||||
|
||||
// CBC mode is insecure and so is not included in the default config.
|
||||
// (See http://www.isg.rhul.ac.uk/~kp/SandPfinal.pdf). If absolutely
|
||||
// (See https://www.ieee-security.org/TC/SP2013/papers/4977a526.pdf). If absolutely
|
||||
// needed, it's possible to specify a custom Config to enable it.
|
||||
// You should expect that an active attacker can recover plaintext if
|
||||
// you do.
|
||||
|
22
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
22
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
@@ -36,7 +36,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
|
||||
// during the authentication phase the client first attempts the "none" method
|
||||
// then any untried methods suggested by the server.
|
||||
tried := make(map[string]bool)
|
||||
var tried []string
|
||||
var lastMethods []string
|
||||
|
||||
sessionID := c.transport.getSessionID()
|
||||
@@ -49,7 +49,9 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
// success
|
||||
return nil
|
||||
} else if ok == authFailure {
|
||||
tried[auth.method()] = true
|
||||
if m := auth.method(); !contains(tried, m) {
|
||||
tried = append(tried, m)
|
||||
}
|
||||
}
|
||||
if methods == nil {
|
||||
methods = lastMethods
|
||||
@@ -61,7 +63,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
findNext:
|
||||
for _, a := range config.Auth {
|
||||
candidateMethod := a.method()
|
||||
if tried[candidateMethod] {
|
||||
if contains(tried, candidateMethod) {
|
||||
continue
|
||||
}
|
||||
for _, meth := range methods {
|
||||
@@ -72,16 +74,16 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", keys(tried))
|
||||
return fmt.Errorf("ssh: unable to authenticate, attempted methods %v, no supported methods remain", tried)
|
||||
}
|
||||
|
||||
func keys(m map[string]bool) []string {
|
||||
s := make([]string, 0, len(m))
|
||||
|
||||
for key := range m {
|
||||
s = append(s, key)
|
||||
func contains(list []string, e string) bool {
|
||||
for _, s := range list {
|
||||
if s == e {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return s
|
||||
return false
|
||||
}
|
||||
|
||||
// An AuthMethod represents an instance of an RFC 4252 authentication method.
|
||||
|
4
vendor/golang.org/x/crypto/ssh/kex.go
generated
vendored
4
vendor/golang.org/x/crypto/ssh/kex.go
generated
vendored
@@ -572,7 +572,7 @@ func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, e
|
||||
return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil
|
||||
}
|
||||
|
||||
func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
// Send GexRequest
|
||||
kexDHGexRequest := kexDHGexRequestMsg{
|
||||
MinBits: dhGroupExchangeMinimumBits,
|
||||
@@ -677,7 +677,7 @@ func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshak
|
||||
// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256.
|
||||
//
|
||||
// This is a minimal implementation to satisfy the automated tests.
|
||||
func (gex *dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
// Receive GexRequest
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
|
18
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
18
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
@@ -1246,15 +1246,23 @@ func passphraseProtectedOpenSSHKey(passphrase []byte) openSSHDecryptFunc {
|
||||
}
|
||||
key, iv := k[:32], k[32:]
|
||||
|
||||
if cipherName != "aes256-ctr" {
|
||||
return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q", cipherName, "aes256-ctr")
|
||||
}
|
||||
c, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctr := cipher.NewCTR(c, iv)
|
||||
ctr.XORKeyStream(privKeyBlock, privKeyBlock)
|
||||
switch cipherName {
|
||||
case "aes256-ctr":
|
||||
ctr := cipher.NewCTR(c, iv)
|
||||
ctr.XORKeyStream(privKeyBlock, privKeyBlock)
|
||||
case "aes256-cbc":
|
||||
if len(privKeyBlock)%c.BlockSize() != 0 {
|
||||
return nil, fmt.Errorf("ssh: invalid encrypted private key length, not a multiple of the block size")
|
||||
}
|
||||
cbc := cipher.NewCBCDecrypter(c, iv)
|
||||
cbc.CryptBlocks(privKeyBlock, privKeyBlock)
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unknown cipher %q, only supports %q or %q", cipherName, "aes256-ctr", "aes256-cbc")
|
||||
}
|
||||
|
||||
return privKeyBlock, nil
|
||||
}
|
||||
|
23
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
23
vendor/golang.org/x/crypto/ssh/mux.go
generated
vendored
@@ -240,7 +240,7 @@ func (m *mux) onePacket() error {
|
||||
id := binary.BigEndian.Uint32(packet[1:])
|
||||
ch := m.chanList.getChan(id)
|
||||
if ch == nil {
|
||||
return fmt.Errorf("ssh: invalid channel %d", id)
|
||||
return m.handleUnknownChannelPacket(id, packet)
|
||||
}
|
||||
|
||||
return ch.handlePacket(packet)
|
||||
@@ -328,3 +328,24 @@ func (m *mux) openChannel(chanType string, extra []byte) (*channel, error) {
|
||||
return nil, fmt.Errorf("ssh: unexpected packet in response to channel open: %T", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *mux) handleUnknownChannelPacket(id uint32, packet []byte) error {
|
||||
msg, err := decode(packet)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch msg := msg.(type) {
|
||||
// RFC 4254 section 5.4 says unrecognized channel requests should
|
||||
// receive a failure response.
|
||||
case *channelRequestMsg:
|
||||
if msg.WantReply {
|
||||
return m.sendMessage(channelRequestFailureMsg{
|
||||
PeersID: msg.PeersID,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf("ssh: invalid channel %d", id)
|
||||
}
|
||||
}
|
||||
|
8
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
8
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
@@ -113,6 +113,7 @@ func NewTerminal(c io.ReadWriter, prompt string) *Terminal {
|
||||
}
|
||||
|
||||
const (
|
||||
keyCtrlC = 3
|
||||
keyCtrlD = 4
|
||||
keyCtrlU = 21
|
||||
keyEnter = '\r'
|
||||
@@ -151,8 +152,12 @@ func bytesToKey(b []byte, pasteActive bool) (rune, []byte) {
|
||||
switch b[0] {
|
||||
case 1: // ^A
|
||||
return keyHome, b[1:]
|
||||
case 2: // ^B
|
||||
return keyLeft, b[1:]
|
||||
case 5: // ^E
|
||||
return keyEnd, b[1:]
|
||||
case 6: // ^F
|
||||
return keyRight, b[1:]
|
||||
case 8: // ^H
|
||||
return keyBackspace, b[1:]
|
||||
case 11: // ^K
|
||||
@@ -738,6 +743,9 @@ func (t *Terminal) readLine() (line string, err error) {
|
||||
return "", io.EOF
|
||||
}
|
||||
}
|
||||
if key == keyCtrlC {
|
||||
return "", io.EOF
|
||||
}
|
||||
if key == keyPasteStart {
|
||||
t.pasteActive = true
|
||||
if len(t.line) == 0 {
|
||||
|
27
vendor/golang.org/x/mod/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/mod/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
22
vendor/golang.org/x/mod/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/mod/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
27
vendor/golang.org/x/mod/module/BUILD
generated
vendored
Normal file
27
vendor/golang.org/x/mod/module/BUILD
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["module.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/mod/module",
|
||||
importpath = "golang.org/x/mod/module",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/mod/semver:go_default_library",
|
||||
"//vendor/golang.org/x/xerrors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@@ -2,8 +2,86 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package module defines the module.Version type
|
||||
// along with support code.
|
||||
// Package module defines the module.Version type along with support code.
|
||||
//
|
||||
// The module.Version type is a simple Path, Version pair:
|
||||
//
|
||||
// type Version struct {
|
||||
// Path string
|
||||
// Version string
|
||||
// }
|
||||
//
|
||||
// There are no restrictions imposed directly by use of this structure,
|
||||
// but additional checking functions, most notably Check, verify that
|
||||
// a particular path, version pair is valid.
|
||||
//
|
||||
// Escaped Paths
|
||||
//
|
||||
// Module paths appear as substrings of file system paths
|
||||
// (in the download cache) and of web server URLs in the proxy protocol.
|
||||
// In general we cannot rely on file systems to be case-sensitive,
|
||||
// nor can we rely on web servers, since they read from file systems.
|
||||
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
|
||||
// and rsc.io/quote separate. Windows and macOS don't.
|
||||
// Instead, we must never require two different casings of a file path.
|
||||
// Because we want the download cache to match the proxy protocol,
|
||||
// and because we want the proxy protocol to be possible to serve
|
||||
// from a tree of static files (which might be stored on a case-insensitive
|
||||
// file system), the proxy protocol must never require two different casings
|
||||
// of a URL path either.
|
||||
//
|
||||
// One possibility would be to make the escaped form be the lowercase
|
||||
// hexadecimal encoding of the actual path bytes. This would avoid ever
|
||||
// needing different casings of a file path, but it would be fairly illegible
|
||||
// to most programmers when those paths appeared in the file system
|
||||
// (including in file paths in compiler errors and stack traces)
|
||||
// in web server logs, and so on. Instead, we want a safe escaped form that
|
||||
// leaves most paths unaltered.
|
||||
//
|
||||
// The safe escaped form is to replace every uppercase letter
|
||||
// with an exclamation mark followed by the letter's lowercase equivalent.
|
||||
//
|
||||
// For example,
|
||||
//
|
||||
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
|
||||
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
|
||||
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
|
||||
//
|
||||
// Import paths that avoid upper-case letters are left unchanged.
|
||||
// Note that because import paths are ASCII-only and avoid various
|
||||
// problematic punctuation (like : < and >), the escaped form is also ASCII-only
|
||||
// and avoids the same problematic punctuation.
|
||||
//
|
||||
// Import paths have never allowed exclamation marks, so there is no
|
||||
// need to define how to escape a literal !.
|
||||
//
|
||||
// Unicode Restrictions
|
||||
//
|
||||
// Today, paths are disallowed from using Unicode.
|
||||
//
|
||||
// Although paths are currently disallowed from using Unicode,
|
||||
// we would like at some point to allow Unicode letters as well, to assume that
|
||||
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
|
||||
// the !-for-uppercase convention for escaping them in the file system.
|
||||
// But there are at least two subtle considerations.
|
||||
//
|
||||
// First, note that not all case-fold equivalent distinct runes
|
||||
// form an upper/lower pair.
|
||||
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
|
||||
// are three distinct runes that case-fold to each other.
|
||||
// When we do add Unicode letters, we must not assume that upper/lower
|
||||
// are the only case-equivalent pairs.
|
||||
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
|
||||
// Or perhaps it would escape as "!!k", or perhaps as "(212A)".
|
||||
//
|
||||
// Second, it would be nice to allow Unicode marks as well as letters,
|
||||
// but marks include combining marks, and then we must deal not
|
||||
// only with case folding but also normalization: both U+00E9 ('é')
|
||||
// and U+0065 U+0301 ('e' followed by combining acute accent)
|
||||
// look the same on the page and are treated by some file systems
|
||||
// as the same path. If we do allow Unicode marks in paths, there
|
||||
// must be some kind of normalization to allow only one canonical
|
||||
// encoding of any character used in an import path.
|
||||
package module
|
||||
|
||||
// IMPORTANT NOTE
|
||||
@@ -24,22 +102,95 @@ import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/tools/internal/semver"
|
||||
"golang.org/x/mod/semver"
|
||||
errors "golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// A Version is defined by a module path and version pair.
|
||||
// A Version (for clients, a module.Version) is defined by a module path and version pair.
|
||||
// These are stored in their plain (unescaped) form.
|
||||
type Version struct {
|
||||
// Path is a module path, like "golang.org/x/text" or "rsc.io/quote/v2".
|
||||
Path string
|
||||
|
||||
// Version is usually a semantic version in canonical form.
|
||||
// There are two exceptions to this general rule.
|
||||
// There are three exceptions to this general rule.
|
||||
// First, the top-level target of a build has no specific version
|
||||
// and uses Version = "".
|
||||
// Second, during MVS calculations the version "none" is used
|
||||
// to represent the decision to take no version of a given module.
|
||||
// Third, filesystem paths found in "replace" directives are
|
||||
// represented by a path with an empty version.
|
||||
Version string `json:",omitempty"`
|
||||
}
|
||||
|
||||
// String returns a representation of the Version suitable for logging
|
||||
// (Path@Version, or just Path if Version is empty).
|
||||
func (m Version) String() string {
|
||||
if m.Version == "" {
|
||||
return m.Path
|
||||
}
|
||||
return m.Path + "@" + m.Version
|
||||
}
|
||||
|
||||
// A ModuleError indicates an error specific to a module.
|
||||
type ModuleError struct {
|
||||
Path string
|
||||
Version string
|
||||
Err error
|
||||
}
|
||||
|
||||
// VersionError returns a ModuleError derived from a Version and error,
|
||||
// or err itself if it is already such an error.
|
||||
func VersionError(v Version, err error) error {
|
||||
var mErr *ModuleError
|
||||
if errors.As(err, &mErr) && mErr.Path == v.Path && mErr.Version == v.Version {
|
||||
return err
|
||||
}
|
||||
return &ModuleError{
|
||||
Path: v.Path,
|
||||
Version: v.Version,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ModuleError) Error() string {
|
||||
if v, ok := e.Err.(*InvalidVersionError); ok {
|
||||
return fmt.Sprintf("%s@%s: invalid %s: %v", e.Path, v.Version, v.noun(), v.Err)
|
||||
}
|
||||
if e.Version != "" {
|
||||
return fmt.Sprintf("%s@%s: %v", e.Path, e.Version, e.Err)
|
||||
}
|
||||
return fmt.Sprintf("module %s: %v", e.Path, e.Err)
|
||||
}
|
||||
|
||||
func (e *ModuleError) Unwrap() error { return e.Err }
|
||||
|
||||
// An InvalidVersionError indicates an error specific to a version, with the
|
||||
// module path unknown or specified externally.
|
||||
//
|
||||
// A ModuleError may wrap an InvalidVersionError, but an InvalidVersionError
|
||||
// must not wrap a ModuleError.
|
||||
type InvalidVersionError struct {
|
||||
Version string
|
||||
Pseudo bool
|
||||
Err error
|
||||
}
|
||||
|
||||
// noun returns either "version" or "pseudo-version", depending on whether
|
||||
// e.Version is a pseudo-version.
|
||||
func (e *InvalidVersionError) noun() string {
|
||||
if e.Pseudo {
|
||||
return "pseudo-version"
|
||||
}
|
||||
return "version"
|
||||
}
|
||||
|
||||
func (e *InvalidVersionError) Error() string {
|
||||
return fmt.Sprintf("%s %q invalid: %s", e.noun(), e.Version, e.Err)
|
||||
}
|
||||
|
||||
func (e *InvalidVersionError) Unwrap() error { return e.Err }
|
||||
|
||||
// Check checks that a given module path, version pair is valid.
|
||||
// In addition to the path being a valid module path
|
||||
// and the version being a valid semantic version,
|
||||
@@ -51,17 +202,14 @@ func Check(path, version string) error {
|
||||
return err
|
||||
}
|
||||
if !semver.IsValid(version) {
|
||||
return fmt.Errorf("malformed semantic version %v", version)
|
||||
return &ModuleError{
|
||||
Path: path,
|
||||
Err: &InvalidVersionError{Version: version, Err: errors.New("not a semantic version")},
|
||||
}
|
||||
}
|
||||
_, pathMajor, _ := SplitPathVersion(path)
|
||||
if !MatchPathMajor(version, pathMajor) {
|
||||
if pathMajor == "" {
|
||||
pathMajor = "v0 or v1"
|
||||
}
|
||||
if pathMajor[0] == '.' { // .v1
|
||||
pathMajor = pathMajor[1:]
|
||||
}
|
||||
return fmt.Errorf("mismatched module path %v and version %v (want %v)", path, version, pathMajor)
|
||||
if err := CheckPathMajor(version, pathMajor); err != nil {
|
||||
return &ModuleError{Path: path, Err: err}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -79,7 +227,7 @@ func firstPathOK(r rune) bool {
|
||||
// Paths can be ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
|
||||
// This matches what "go get" has historically recognized in import paths.
|
||||
// TODO(rsc): We would like to allow Unicode letters, but that requires additional
|
||||
// care in the safe encoding (see note below).
|
||||
// care in the safe encoding (see "escaped paths" above).
|
||||
func pathOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
return r == '+' || r == '-' || r == '.' || r == '_' || r == '~' ||
|
||||
@@ -94,7 +242,7 @@ func pathOK(r rune) bool {
|
||||
// For now we allow all Unicode letters but otherwise limit to pathOK plus a few more punctuation characters.
|
||||
// If we expand the set of allowed characters here, we have to
|
||||
// work harder at detecting potential case-folding and normalization collisions.
|
||||
// See note about "safe encoding" below.
|
||||
// See note about "escaped paths" above.
|
||||
func fileNameOK(r rune) bool {
|
||||
if r < utf8.RuneSelf {
|
||||
// Entire set of ASCII punctuation, from which we remove characters:
|
||||
@@ -120,6 +268,17 @@ func fileNameOK(r rune) bool {
|
||||
}
|
||||
|
||||
// CheckPath checks that a module path is valid.
|
||||
// A valid module path is a valid import path, as checked by CheckImportPath,
|
||||
// with two additional constraints.
|
||||
// First, the leading path element (up to the first slash, if any),
|
||||
// by convention a domain name, must contain only lower-case ASCII letters,
|
||||
// ASCII digits, dots (U+002E), and dashes (U+002D);
|
||||
// it must contain at least one dot and cannot start with a dash.
|
||||
// Second, for a final path element of the form /vN, where N looks numeric
|
||||
// (ASCII digits and dots) must not begin with a leading zero, must not be /v1,
|
||||
// and must not contain any dots. For paths beginning with "gopkg.in/",
|
||||
// this second requirement is replaced by a requirement that the path
|
||||
// follow the gopkg.in server's conventions.
|
||||
func CheckPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed module path %q: %v", path, err)
|
||||
@@ -149,6 +308,20 @@ func CheckPath(path string) error {
|
||||
}
|
||||
|
||||
// CheckImportPath checks that an import path is valid.
|
||||
//
|
||||
// A valid import path consists of one or more valid path elements
|
||||
// separated by slashes (U+002F). (It must not begin with nor end in a slash.)
|
||||
//
|
||||
// A valid path element is a non-empty string made up of
|
||||
// ASCII letters, ASCII digits, and limited ASCII punctuation: + - . _ and ~.
|
||||
// It must not begin or end with a dot (U+002E), nor contain two dots in a row.
|
||||
//
|
||||
// The element prefix up to the first dot must not be a reserved file name
|
||||
// on Windows, regardless of case (CON, com1, NuL, and so on).
|
||||
//
|
||||
// CheckImportPath may be less restrictive in the future, but see the
|
||||
// top-level package documentation for additional information about
|
||||
// subtleties of Unicode.
|
||||
func CheckImportPath(path string) error {
|
||||
if err := checkPath(path, false); err != nil {
|
||||
return fmt.Errorf("malformed import path %q: %v", path, err)
|
||||
@@ -169,8 +342,8 @@ func checkPath(path string, fileName bool) error {
|
||||
if path == "" {
|
||||
return fmt.Errorf("empty string")
|
||||
}
|
||||
if strings.Contains(path, "..") {
|
||||
return fmt.Errorf("double dot")
|
||||
if path[0] == '-' {
|
||||
return fmt.Errorf("leading dash")
|
||||
}
|
||||
if strings.Contains(path, "//") {
|
||||
return fmt.Errorf("double slash")
|
||||
@@ -226,13 +399,24 @@ func checkElem(elem string, fileName bool) error {
|
||||
}
|
||||
for _, bad := range badWindowsNames {
|
||||
if strings.EqualFold(bad, short) {
|
||||
return fmt.Errorf("disallowed path element %q", elem)
|
||||
return fmt.Errorf("%q disallowed as path element component on Windows", short)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckFilePath checks whether a slash-separated file path is valid.
|
||||
// CheckFilePath checks that a slash-separated file path is valid.
|
||||
// The definition of a valid file path is the same as the definition
|
||||
// of a valid import path except that the set of allowed characters is larger:
|
||||
// all Unicode letters, ASCII digits, the ASCII space character (U+0020),
|
||||
// and the ASCII punctuation characters
|
||||
// “!#$%&()+,-.=@[]^_{}~”.
|
||||
// (The excluded punctuation characters, " * < > ? ` ' | / \ and :,
|
||||
// have special meanings in certain shells or operating systems.)
|
||||
//
|
||||
// CheckFilePath may be less restrictive in the future, but see the
|
||||
// top-level package documentation for additional information about
|
||||
// subtleties of Unicode.
|
||||
func CheckFilePath(path string) error {
|
||||
if err := checkPath(path, true); err != nil {
|
||||
return fmt.Errorf("malformed file path %q: %v", path, err)
|
||||
@@ -271,6 +455,9 @@ var badWindowsNames = []string{
|
||||
// and version is either empty or "/vN" for N >= 2.
|
||||
// As a special case, gopkg.in paths are recognized directly;
|
||||
// they require ".vN" instead of "/vN", and for all N, not just N >= 2.
|
||||
// SplitPathVersion returns with ok = false when presented with
|
||||
// a path whose last path element does not satisfy the constraints
|
||||
// applied by CheckPath, such as "example.com/pkg/v1" or "example.com/pkg/v1.2".
|
||||
func SplitPathVersion(path string) (prefix, pathMajor string, ok bool) {
|
||||
if strings.HasPrefix(path, "gopkg.in/") {
|
||||
return splitGopkgIn(path)
|
||||
@@ -319,20 +506,65 @@ func splitGopkgIn(path string) (prefix, pathMajor string, ok bool) {
|
||||
|
||||
// MatchPathMajor reports whether the semantic version v
|
||||
// matches the path major version pathMajor.
|
||||
//
|
||||
// MatchPathMajor returns true if and only if CheckPathMajor returns nil.
|
||||
func MatchPathMajor(v, pathMajor string) bool {
|
||||
return CheckPathMajor(v, pathMajor) == nil
|
||||
}
|
||||
|
||||
// CheckPathMajor returns a non-nil error if the semantic version v
|
||||
// does not match the path major version pathMajor.
|
||||
func CheckPathMajor(v, pathMajor string) error {
|
||||
// TODO(jayconrod): return errors or panic for invalid inputs. This function
|
||||
// (and others) was covered by integration tests for cmd/go, and surrounding
|
||||
// code protected against invalid inputs like non-canonical versions.
|
||||
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
|
||||
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
|
||||
}
|
||||
if strings.HasPrefix(v, "v0.0.0-") && pathMajor == ".v1" {
|
||||
// Allow old bug in pseudo-versions that generated v0.0.0- pseudoversion for gopkg .v1.
|
||||
// For example, gopkg.in/yaml.v2@v2.2.1's go.mod requires gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405.
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
m := semver.Major(v)
|
||||
if pathMajor == "" {
|
||||
return m == "v0" || m == "v1" || semver.Build(v) == "+incompatible"
|
||||
if m == "v0" || m == "v1" || semver.Build(v) == "+incompatible" {
|
||||
return nil
|
||||
}
|
||||
pathMajor = "v0 or v1"
|
||||
} else if pathMajor[0] == '/' || pathMajor[0] == '.' {
|
||||
if m == pathMajor[1:] {
|
||||
return nil
|
||||
}
|
||||
pathMajor = pathMajor[1:]
|
||||
}
|
||||
return (pathMajor[0] == '/' || pathMajor[0] == '.') && m == pathMajor[1:]
|
||||
return &InvalidVersionError{
|
||||
Version: v,
|
||||
Err: fmt.Errorf("should be %s, not %s", pathMajor, semver.Major(v)),
|
||||
}
|
||||
}
|
||||
|
||||
// PathMajorPrefix returns the major-version tag prefix implied by pathMajor.
|
||||
// An empty PathMajorPrefix allows either v0 or v1.
|
||||
//
|
||||
// Note that MatchPathMajor may accept some versions that do not actually begin
|
||||
// with this prefix: namely, it accepts a 'v0.0.0-' prefix for a '.v1'
|
||||
// pathMajor, even though that pathMajor implies 'v1' tagging.
|
||||
func PathMajorPrefix(pathMajor string) string {
|
||||
if pathMajor == "" {
|
||||
return ""
|
||||
}
|
||||
if pathMajor[0] != '/' && pathMajor[0] != '.' {
|
||||
panic("pathMajor suffix " + pathMajor + " passed to PathMajorPrefix lacks separator")
|
||||
}
|
||||
if strings.HasPrefix(pathMajor, ".v") && strings.HasSuffix(pathMajor, "-unstable") {
|
||||
pathMajor = strings.TrimSuffix(pathMajor, "-unstable")
|
||||
}
|
||||
m := pathMajor[1:]
|
||||
if m != semver.Major(m) {
|
||||
panic("pathMajor suffix " + pathMajor + "passed to PathMajorPrefix is not a valid major version")
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// CanonicalVersion returns the canonical form of the version string v.
|
||||
@@ -345,7 +577,10 @@ func CanonicalVersion(v string) string {
|
||||
return cv
|
||||
}
|
||||
|
||||
// Sort sorts the list by Path, breaking ties by comparing Versions.
|
||||
// Sort sorts the list by Path, breaking ties by comparing Version fields.
|
||||
// The Version fields are interpreted as semantic versions (using semver.Compare)
|
||||
// optionally followed by a tie-breaking suffix introduced by a slash character,
|
||||
// like in "v0.0.1/go.mod".
|
||||
func Sort(list []Version) {
|
||||
sort.Slice(list, func(i, j int) bool {
|
||||
mi := list[i]
|
||||
@@ -372,93 +607,36 @@ func Sort(list []Version) {
|
||||
})
|
||||
}
|
||||
|
||||
// Safe encodings
|
||||
//
|
||||
// Module paths appear as substrings of file system paths
|
||||
// (in the download cache) and of web server URLs in the proxy protocol.
|
||||
// In general we cannot rely on file systems to be case-sensitive,
|
||||
// nor can we rely on web servers, since they read from file systems.
|
||||
// That is, we cannot rely on the file system to keep rsc.io/QUOTE
|
||||
// and rsc.io/quote separate. Windows and macOS don't.
|
||||
// Instead, we must never require two different casings of a file path.
|
||||
// Because we want the download cache to match the proxy protocol,
|
||||
// and because we want the proxy protocol to be possible to serve
|
||||
// from a tree of static files (which might be stored on a case-insensitive
|
||||
// file system), the proxy protocol must never require two different casings
|
||||
// of a URL path either.
|
||||
//
|
||||
// One possibility would be to make the safe encoding be the lowercase
|
||||
// hexadecimal encoding of the actual path bytes. This would avoid ever
|
||||
// needing different casings of a file path, but it would be fairly illegible
|
||||
// to most programmers when those paths appeared in the file system
|
||||
// (including in file paths in compiler errors and stack traces)
|
||||
// in web server logs, and so on. Instead, we want a safe encoding that
|
||||
// leaves most paths unaltered.
|
||||
//
|
||||
// The safe encoding is this:
|
||||
// replace every uppercase letter with an exclamation mark
|
||||
// followed by the letter's lowercase equivalent.
|
||||
//
|
||||
// For example,
|
||||
// github.com/Azure/azure-sdk-for-go -> github.com/!azure/azure-sdk-for-go.
|
||||
// github.com/GoogleCloudPlatform/cloudsql-proxy -> github.com/!google!cloud!platform/cloudsql-proxy
|
||||
// github.com/Sirupsen/logrus -> github.com/!sirupsen/logrus.
|
||||
//
|
||||
// Import paths that avoid upper-case letters are left unchanged.
|
||||
// Note that because import paths are ASCII-only and avoid various
|
||||
// problematic punctuation (like : < and >), the safe encoding is also ASCII-only
|
||||
// and avoids the same problematic punctuation.
|
||||
//
|
||||
// Import paths have never allowed exclamation marks, so there is no
|
||||
// need to define how to encode a literal !.
|
||||
//
|
||||
// Although paths are disallowed from using Unicode (see pathOK above),
|
||||
// the eventual plan is to allow Unicode letters as well, to assume that
|
||||
// file systems and URLs are Unicode-safe (storing UTF-8), and apply
|
||||
// the !-for-uppercase convention. Note however that not all runes that
|
||||
// are different but case-fold equivalent are an upper/lower pair.
|
||||
// For example, U+004B ('K'), U+006B ('k'), and U+212A ('K' for Kelvin)
|
||||
// are considered to case-fold to each other. When we do add Unicode
|
||||
// letters, we must not assume that upper/lower are the only case-equivalent pairs.
|
||||
// Perhaps the Kelvin symbol would be disallowed entirely, for example.
|
||||
// Or perhaps it would encode as "!!k", or perhaps as "(212A)".
|
||||
//
|
||||
// Also, it would be nice to allow Unicode marks as well as letters,
|
||||
// but marks include combining marks, and then we must deal not
|
||||
// only with case folding but also normalization: both U+00E9 ('é')
|
||||
// and U+0065 U+0301 ('e' followed by combining acute accent)
|
||||
// look the same on the page and are treated by some file systems
|
||||
// as the same path. If we do allow Unicode marks in paths, there
|
||||
// must be some kind of normalization to allow only one canonical
|
||||
// encoding of any character used in an import path.
|
||||
|
||||
// EncodePath returns the safe encoding of the given module path.
|
||||
// EscapePath returns the escaped form of the given module path.
|
||||
// It fails if the module path is invalid.
|
||||
func EncodePath(path string) (encoding string, err error) {
|
||||
func EscapePath(path string) (escaped string, err error) {
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return encodeString(path)
|
||||
return escapeString(path)
|
||||
}
|
||||
|
||||
// EncodeVersion returns the safe encoding of the given module version.
|
||||
// EscapeVersion returns the escaped form of the given module version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func EncodeVersion(v string) (encoding string, err error) {
|
||||
func EscapeVersion(v string) (escaped string, err error) {
|
||||
if err := checkElem(v, true); err != nil || strings.Contains(v, "!") {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
return "", &InvalidVersionError{
|
||||
Version: v,
|
||||
Err: fmt.Errorf("disallowed version string"),
|
||||
}
|
||||
}
|
||||
return encodeString(v)
|
||||
return escapeString(v)
|
||||
}
|
||||
|
||||
func encodeString(s string) (encoding string, err error) {
|
||||
func escapeString(s string) (escaped string, err error) {
|
||||
haveUpper := false
|
||||
for _, r := range s {
|
||||
if r == '!' || r >= utf8.RuneSelf {
|
||||
// This should be disallowed by CheckPath, but diagnose anyway.
|
||||
// The correctness of the encoding loop below depends on it.
|
||||
return "", fmt.Errorf("internal error: inconsistency in EncodePath")
|
||||
// The correctness of the escaping loop below depends on it.
|
||||
return "", fmt.Errorf("internal error: inconsistency in EscapePath")
|
||||
}
|
||||
if 'A' <= r && r <= 'Z' {
|
||||
haveUpper = true
|
||||
@@ -480,39 +658,39 @@ func encodeString(s string) (encoding string, err error) {
|
||||
return string(buf), nil
|
||||
}
|
||||
|
||||
// DecodePath returns the module path of the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid path.
|
||||
func DecodePath(encoding string) (path string, err error) {
|
||||
path, ok := decodeString(encoding)
|
||||
// UnescapePath returns the module path for the given escaped path.
|
||||
// It fails if the escaped path is invalid or describes an invalid path.
|
||||
func UnescapePath(escaped string) (path string, err error) {
|
||||
path, ok := unescapeString(escaped)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid module path encoding %q", encoding)
|
||||
return "", fmt.Errorf("invalid escaped module path %q", escaped)
|
||||
}
|
||||
if err := CheckPath(path); err != nil {
|
||||
return "", fmt.Errorf("invalid module path encoding %q: %v", encoding, err)
|
||||
return "", fmt.Errorf("invalid escaped module path %q: %v", escaped, err)
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
||||
// DecodeVersion returns the version string for the given safe encoding.
|
||||
// It fails if the encoding is invalid or encodes an invalid version.
|
||||
// UnescapeVersion returns the version string for the given escaped version.
|
||||
// It fails if the escaped form is invalid or describes an invalid version.
|
||||
// Versions are allowed to be in non-semver form but must be valid file names
|
||||
// and not contain exclamation marks.
|
||||
func DecodeVersion(encoding string) (v string, err error) {
|
||||
v, ok := decodeString(encoding)
|
||||
func UnescapeVersion(escaped string) (v string, err error) {
|
||||
v, ok := unescapeString(escaped)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("invalid version encoding %q", encoding)
|
||||
return "", fmt.Errorf("invalid escaped version %q", escaped)
|
||||
}
|
||||
if err := checkElem(v, true); err != nil {
|
||||
return "", fmt.Errorf("disallowed version string %q", v)
|
||||
return "", fmt.Errorf("invalid escaped version %q: %v", v, err)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func decodeString(encoding string) (string, bool) {
|
||||
func unescapeString(escaped string) (string, bool) {
|
||||
var buf []byte
|
||||
|
||||
bang := false
|
||||
for _, r := range encoding {
|
||||
for _, r := range escaped {
|
||||
if r >= utf8.RuneSelf {
|
||||
return "", false
|
||||
}
|
23
vendor/golang.org/x/mod/semver/BUILD
generated
vendored
Normal file
23
vendor/golang.org/x/mod/semver/BUILD
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["semver.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/mod/semver",
|
||||
importpath = "golang.org/x/mod/semver",
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
@@ -107,7 +107,7 @@ func Build(v string) string {
|
||||
}
|
||||
|
||||
// Compare returns an integer comparing two versions according to
|
||||
// according to semantic version precedence.
|
||||
// semantic version precedence.
|
||||
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
|
||||
//
|
||||
// An invalid semantic version string is considered less than a valid one.
|
||||
@@ -263,7 +263,7 @@ func parseBuild(v string) (t, rest string, ok bool) {
|
||||
i := 1
|
||||
start := 1
|
||||
for i < len(v) {
|
||||
if !isIdentChar(v[i]) {
|
||||
if !isIdentChar(v[i]) && v[i] != '.' {
|
||||
return
|
||||
}
|
||||
if v[i] == '.' {
|
8
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
8
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
@@ -107,6 +107,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
||||
|
||||
// dialCall is an in-flight Transport dial call to a host.
|
||||
type dialCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
res *ClientConn // valid after done is closed
|
||||
@@ -180,6 +181,7 @@ func (p *clientConnPool) addConnIfNeeded(key string, t *Transport, c *tls.Conn)
|
||||
}
|
||||
|
||||
type addConnCall struct {
|
||||
_ incomparable
|
||||
p *clientConnPool
|
||||
done chan struct{} // closed when done
|
||||
err error
|
||||
@@ -200,12 +202,6 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
||||
close(c.done)
|
||||
}
|
||||
|
||||
func (p *clientConnPool) addConn(key string, cc *ClientConn) {
|
||||
p.mu.Lock()
|
||||
p.addConnLocked(key, cc)
|
||||
p.mu.Unlock()
|
||||
}
|
||||
|
||||
// p.mu must be held
|
||||
func (p *clientConnPool) addConnLocked(key string, cc *ClientConn) {
|
||||
for _, v := range p.conns[key] {
|
||||
|
2
vendor/golang.org/x/net/http2/flow.go
generated
vendored
2
vendor/golang.org/x/net/http2/flow.go
generated
vendored
@@ -8,6 +8,8 @@ package http2
|
||||
|
||||
// flow is the flow control window's size.
|
||||
type flow struct {
|
||||
_ incomparable
|
||||
|
||||
// n is the number of DATA bytes we're allowed to send.
|
||||
// A flow is kept both on a conn and a per-stream.
|
||||
n int32
|
||||
|
7
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
7
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
@@ -105,7 +105,14 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
||||
type node struct {
|
||||
_ incomparable
|
||||
|
||||
// children is non-nil for internal nodes
|
||||
children *[256]*node
|
||||
|
||||
|
7
vendor/golang.org/x/net/http2/http2.go
generated
vendored
7
vendor/golang.org/x/net/http2/http2.go
generated
vendored
@@ -241,6 +241,7 @@ func (cw closeWaiter) Wait() {
|
||||
// Its buffered writer is lazily allocated as needed, to minimize
|
||||
// idle memory usage with many connections.
|
||||
type bufferedWriter struct {
|
||||
_ incomparable
|
||||
w io.Writer // immutable
|
||||
bw *bufio.Writer // non-nil when data is buffered
|
||||
}
|
||||
@@ -313,6 +314,7 @@ func bodyAllowedForStatus(status int) bool {
|
||||
}
|
||||
|
||||
type httpError struct {
|
||||
_ incomparable
|
||||
msg string
|
||||
timeout bool
|
||||
}
|
||||
@@ -376,3 +378,8 @@ func (s *sorter) SortStrings(ss []string) {
|
||||
func validPseudoPath(v string) bool {
|
||||
return (len(v) > 0 && v[0] == '/') || v == "*"
|
||||
}
|
||||
|
||||
// incomparable is a zero-width, non-comparable type. Adding it to a struct
|
||||
// makes that struct also non-comparable, and generally doesn't add
|
||||
// any size (as long as it's first).
|
||||
type incomparable [0]func()
|
||||
|
8
vendor/golang.org/x/net/http2/server.go
generated
vendored
8
vendor/golang.org/x/net/http2/server.go
generated
vendored
@@ -761,6 +761,7 @@ func (sc *serverConn) readFrames() {
|
||||
|
||||
// frameWriteResult is the message passed from writeFrameAsync to the serve goroutine.
|
||||
type frameWriteResult struct {
|
||||
_ incomparable
|
||||
wr FrameWriteRequest // what was written (or attempted)
|
||||
err error // result of the writeFrame call
|
||||
}
|
||||
@@ -771,7 +772,7 @@ type frameWriteResult struct {
|
||||
// serverConn.
|
||||
func (sc *serverConn) writeFrameAsync(wr FrameWriteRequest) {
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrameCh <- frameWriteResult{wr, err}
|
||||
sc.wroteFrameCh <- frameWriteResult{wr: wr, err: err}
|
||||
}
|
||||
|
||||
func (sc *serverConn) closeAllStreamsOnConnClose() {
|
||||
@@ -1161,7 +1162,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) {
|
||||
if wr.write.staysWithinBuffer(sc.bw.Available()) {
|
||||
sc.writingFrameAsync = false
|
||||
err := wr.write.writeFrame(sc)
|
||||
sc.wroteFrame(frameWriteResult{wr, err})
|
||||
sc.wroteFrame(frameWriteResult{wr: wr, err: err})
|
||||
} else {
|
||||
sc.writingFrameAsync = true
|
||||
go sc.writeFrameAsync(wr)
|
||||
@@ -2057,7 +2058,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
|
||||
var trailer http.Header
|
||||
for _, v := range rp.header["Trailer"] {
|
||||
for _, key := range strings.Split(v, ",") {
|
||||
key = http.CanonicalHeaderKey(strings.TrimSpace(key))
|
||||
key = http.CanonicalHeaderKey(textproto.TrimString(key))
|
||||
switch key {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
// Bogus. (copy of http1 rules)
|
||||
@@ -2275,6 +2276,7 @@ func (sc *serverConn) sendWindowUpdate32(st *stream, n int32) {
|
||||
// requestBody is the Handler's Request.Body type.
|
||||
// Read and Close may be called concurrently.
|
||||
type requestBody struct {
|
||||
_ incomparable
|
||||
stream *stream
|
||||
conn *serverConn
|
||||
closed bool // for use by Close only
|
||||
|
78
vendor/golang.org/x/net/http2/transport.go
generated
vendored
78
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@@ -108,6 +108,19 @@ type Transport struct {
|
||||
// waiting for their turn.
|
||||
StrictMaxConcurrentStreams bool
|
||||
|
||||
// ReadIdleTimeout is the timeout after which a health check using ping
|
||||
// frame will be carried out if no frame is received on the connection.
|
||||
// Note that a ping response will is considered a received frame, so if
|
||||
// there is no other traffic on the connection, the health check will
|
||||
// be performed every ReadIdleTimeout interval.
|
||||
// If zero, no health check is performed.
|
||||
ReadIdleTimeout time.Duration
|
||||
|
||||
// PingTimeout is the timeout after which the connection will be closed
|
||||
// if a response to Ping is not received.
|
||||
// Defaults to 15s.
|
||||
PingTimeout time.Duration
|
||||
|
||||
// t1, if non-nil, is the standard library Transport using
|
||||
// this transport. Its settings are used (but not its
|
||||
// RoundTrip method, etc).
|
||||
@@ -131,6 +144,14 @@ func (t *Transport) disableCompression() bool {
|
||||
return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression)
|
||||
}
|
||||
|
||||
func (t *Transport) pingTimeout() time.Duration {
|
||||
if t.PingTimeout == 0 {
|
||||
return 15 * time.Second
|
||||
}
|
||||
return t.PingTimeout
|
||||
|
||||
}
|
||||
|
||||
// ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2.
|
||||
// It returns an error if t1 has already been HTTP/2-enabled.
|
||||
func ConfigureTransport(t1 *http.Transport) error {
|
||||
@@ -675,6 +696,20 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
|
||||
return cc, nil
|
||||
}
|
||||
|
||||
func (cc *ClientConn) healthCheck() {
|
||||
pingTimeout := cc.t.pingTimeout()
|
||||
// We don't need to periodically ping in the health check, because the readLoop of ClientConn will
|
||||
// trigger the healthCheck again if there is no frame received.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), pingTimeout)
|
||||
defer cancel()
|
||||
err := cc.Ping(ctx)
|
||||
if err != nil {
|
||||
cc.closeForLostPing()
|
||||
cc.t.connPool().MarkDead(cc)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (cc *ClientConn) setGoAway(f *GoAwayFrame) {
|
||||
cc.mu.Lock()
|
||||
defer cc.mu.Unlock()
|
||||
@@ -846,14 +881,12 @@ func (cc *ClientConn) sendGoAway() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Close closes the client connection immediately.
|
||||
//
|
||||
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
|
||||
func (cc *ClientConn) Close() error {
|
||||
// closes the client connection immediately. In-flight requests are interrupted.
|
||||
// err is sent to streams.
|
||||
func (cc *ClientConn) closeForError(err error) error {
|
||||
cc.mu.Lock()
|
||||
defer cc.cond.Broadcast()
|
||||
defer cc.mu.Unlock()
|
||||
err := errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
for id, cs := range cc.streams {
|
||||
select {
|
||||
case cs.resc <- resAndError{err: err}:
|
||||
@@ -866,6 +899,20 @@ func (cc *ClientConn) Close() error {
|
||||
return cc.tconn.Close()
|
||||
}
|
||||
|
||||
// Close closes the client connection immediately.
|
||||
//
|
||||
// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead.
|
||||
func (cc *ClientConn) Close() error {
|
||||
err := errors.New("http2: client connection force closed via ClientConn.Close")
|
||||
return cc.closeForError(err)
|
||||
}
|
||||
|
||||
// closes the client connection immediately. In-flight requests are interrupted.
|
||||
func (cc *ClientConn) closeForLostPing() error {
|
||||
err := errors.New("http2: client connection lost")
|
||||
return cc.closeForError(err)
|
||||
}
|
||||
|
||||
const maxAllocFrameSize = 512 << 10
|
||||
|
||||
// frameBuffer returns a scratch buffer suitable for writing DATA frames.
|
||||
@@ -916,7 +963,7 @@ func commaSeparatedTrailers(req *http.Request) (string, error) {
|
||||
k = http.CanonicalHeaderKey(k)
|
||||
switch k {
|
||||
case "Transfer-Encoding", "Trailer", "Content-Length":
|
||||
return "", &badStringError{"invalid Trailer key", k}
|
||||
return "", fmt.Errorf("invalid Trailer key %q", k)
|
||||
}
|
||||
keys = append(keys, k)
|
||||
}
|
||||
@@ -1394,13 +1441,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
|
||||
}
|
||||
}
|
||||
|
||||
type badStringError struct {
|
||||
what string
|
||||
str string
|
||||
}
|
||||
|
||||
func (e *badStringError) Error() string { return fmt.Sprintf("%s %q", e.what, e.str) }
|
||||
|
||||
// requires cc.mu be held.
|
||||
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
|
||||
cc.hbuf.Reset()
|
||||
@@ -1616,6 +1656,7 @@ func (cc *ClientConn) writeHeader(name, value string) {
|
||||
}
|
||||
|
||||
type resAndError struct {
|
||||
_ incomparable
|
||||
res *http.Response
|
||||
err error
|
||||
}
|
||||
@@ -1663,6 +1704,7 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream {
|
||||
|
||||
// clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop.
|
||||
type clientConnReadLoop struct {
|
||||
_ incomparable
|
||||
cc *ClientConn
|
||||
closeWhenIdle bool
|
||||
}
|
||||
@@ -1742,8 +1784,17 @@ func (rl *clientConnReadLoop) run() error {
|
||||
rl.closeWhenIdle = cc.t.disableKeepAlives() || cc.singleUse
|
||||
gotReply := false // ever saw a HEADERS reply
|
||||
gotSettings := false
|
||||
readIdleTimeout := cc.t.ReadIdleTimeout
|
||||
var t *time.Timer
|
||||
if readIdleTimeout != 0 {
|
||||
t = time.AfterFunc(readIdleTimeout, cc.healthCheck)
|
||||
defer t.Stop()
|
||||
}
|
||||
for {
|
||||
f, err := cc.fr.ReadFrame()
|
||||
if t != nil {
|
||||
t.Reset(readIdleTimeout)
|
||||
}
|
||||
if err != nil {
|
||||
cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err)
|
||||
}
|
||||
@@ -2479,6 +2530,7 @@ func (rt erringRoundTripper) RoundTrip(*http.Request) (*http.Response, error) {
|
||||
// gzipReader wraps a response body so it can lazily
|
||||
// call gzip.NewReader on the first call to Read
|
||||
type gzipReader struct {
|
||||
_ incomparable
|
||||
body io.ReadCloser // underlying Response.Body
|
||||
zr *gzip.Reader // lazily-initialized gzip reader
|
||||
zerr error // sticky error
|
||||
|
5
vendor/golang.org/x/net/ipv4/header.go
generated
vendored
5
vendor/golang.org/x/net/ipv4/header.go
generated
vendored
@@ -14,9 +14,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
Version = 4 // protocol version
|
||||
HeaderLen = 20 // header length without extension headers
|
||||
maxHeaderLen = 60 // sensible default, revisit if later RFCs define new usage of version and header length fields
|
||||
Version = 4 // protocol version
|
||||
HeaderLen = 20 // header length without extension headers
|
||||
)
|
||||
|
||||
type HeaderFlags int
|
||||
|
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
5
vendor/golang.org/x/tools/go/ast/astutil/imports.go
generated
vendored
@@ -275,9 +275,10 @@ func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (del
|
||||
|
||||
// We deleted an entry but now there may be
|
||||
// a blank line-sized hole where the import was.
|
||||
if line-lastLine > 1 {
|
||||
if line-lastLine > 1 || !gen.Rparen.IsValid() {
|
||||
// There was a blank line immediately preceding the deleted import,
|
||||
// so there's no need to close the hole.
|
||||
// so there's no need to close the hole. The right parenthesis is
|
||||
// invalid after AddImport to an import statement without parenthesis.
|
||||
// Do nothing.
|
||||
} else if line != fset.File(gen.Rparen).LineCount() {
|
||||
// There was no blank line. Close the hole.
|
||||
|
8
vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
generated
vendored
8
vendor/golang.org/x/tools/go/internal/gcimporter/gcimporter.go
generated
vendored
@@ -344,7 +344,7 @@ func (p *parser) expectKeyword(keyword string) {
|
||||
|
||||
// PackageId = string_lit .
|
||||
//
|
||||
func (p *parser) parsePackageId() string {
|
||||
func (p *parser) parsePackageID() string {
|
||||
id, err := strconv.Unquote(p.expect(scanner.String))
|
||||
if err != nil {
|
||||
p.error(err)
|
||||
@@ -384,7 +384,7 @@ func (p *parser) parseDotIdent() string {
|
||||
//
|
||||
func (p *parser) parseQualifiedName() (id, name string) {
|
||||
p.expect('@')
|
||||
id = p.parsePackageId()
|
||||
id = p.parsePackageID()
|
||||
p.expect('.')
|
||||
// Per rev f280b8a485fd (10/2/2013), qualified names may be used for anonymous fields.
|
||||
if p.tok == '?' {
|
||||
@@ -696,7 +696,7 @@ func (p *parser) parseInterfaceType(parent *types.Package) types.Type {
|
||||
|
||||
// Complete requires the type's embedded interfaces to be fully defined,
|
||||
// but we do not define any
|
||||
return types.NewInterface(methods, nil).Complete()
|
||||
return newInterface(methods, nil).Complete()
|
||||
}
|
||||
|
||||
// ChanType = ( "chan" [ "<-" ] | "<-" "chan" ) Type .
|
||||
@@ -785,7 +785,7 @@ func (p *parser) parseType(parent *types.Package) types.Type {
|
||||
func (p *parser) parseImportDecl() {
|
||||
p.expectKeyword("import")
|
||||
name := p.parsePackageName()
|
||||
p.getPkg(p.parsePackageId(), name)
|
||||
p.getPkg(p.parsePackageID(), name)
|
||||
}
|
||||
|
||||
// int_lit = [ "+" | "-" ] { "0" ... "9" } .
|
||||
|
1
vendor/golang.org/x/tools/go/internal/packagesdriver/BUILD
generated
vendored
1
vendor/golang.org/x/tools/go/internal/packagesdriver/BUILD
generated
vendored
@@ -6,6 +6,7 @@ go_library(
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/internal/packagesdriver",
|
||||
importpath = "golang.org/x/tools/go/internal/packagesdriver",
|
||||
visibility = ["//vendor/golang.org/x/tools/go:__subpackages__"],
|
||||
deps = ["//vendor/golang.org/x/tools/internal/gocommand:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
|
109
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
109
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
@@ -11,17 +11,15 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
)
|
||||
|
||||
var debug = false
|
||||
|
||||
// GetSizes returns the sizes used by the underlying driver with the given parameters.
|
||||
func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
|
||||
func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
|
||||
// TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
|
||||
const toolPrefix = "GOPACKAGESDRIVER="
|
||||
tool := ""
|
||||
@@ -41,7 +39,7 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp
|
||||
}
|
||||
|
||||
if tool == "off" {
|
||||
return GetSizesGolist(ctx, buildFlags, env, dir, usesExportData)
|
||||
return GetSizesGolist(ctx, buildFlags, env, gocmdRunner, dir)
|
||||
}
|
||||
|
||||
req, err := json.Marshal(struct {
|
||||
@@ -77,98 +75,43 @@ func GetSizes(ctx context.Context, buildFlags, env []string, dir string, usesExp
|
||||
return response.Sizes, nil
|
||||
}
|
||||
|
||||
func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, usesExportData bool) (types.Sizes, error) {
|
||||
args := []string{"list", "-f", "{{context.GOARCH}} {{context.Compiler}}"}
|
||||
args = append(args, buildFlags...)
|
||||
args = append(args, "--", "unsafe")
|
||||
stdout, stderr, err := invokeGo(ctx, env, dir, usesExportData, args...)
|
||||
func GetSizesGolist(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
|
||||
inv := gocommand.Invocation{
|
||||
Verb: "list",
|
||||
Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"},
|
||||
Env: env,
|
||||
BuildFlags: buildFlags,
|
||||
WorkingDir: dir,
|
||||
}
|
||||
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
|
||||
var goarch, compiler string
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "cannot find main module") {
|
||||
if rawErr != nil {
|
||||
if strings.Contains(rawErr.Error(), "cannot find main module") {
|
||||
// User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
|
||||
// TODO(matloob): Is this a problem in practice?
|
||||
envout, _, enverr := invokeGo(ctx, env, dir, usesExportData, "env", "GOARCH")
|
||||
inv := gocommand.Invocation{
|
||||
Verb: "env",
|
||||
Args: []string{"GOARCH"},
|
||||
Env: env,
|
||||
WorkingDir: dir,
|
||||
}
|
||||
envout, enverr := gocmdRunner.Run(ctx, inv)
|
||||
if enverr != nil {
|
||||
return nil, err
|
||||
return nil, enverr
|
||||
}
|
||||
goarch = strings.TrimSpace(envout.String())
|
||||
compiler = "gc"
|
||||
} else {
|
||||
return nil, err
|
||||
return nil, friendlyErr
|
||||
}
|
||||
} else {
|
||||
fields := strings.Fields(stdout.String())
|
||||
if len(fields) < 2 {
|
||||
return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\" from stdout of go command:\n%s\ndir: %s\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
cmdDebugStr(env, args...), dir, stdout.String(), stderr.String())
|
||||
return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
|
||||
stdout.String(), stderr.String())
|
||||
}
|
||||
goarch = fields[0]
|
||||
compiler = fields[1]
|
||||
}
|
||||
return types.SizesFor(compiler, goarch), nil
|
||||
}
|
||||
|
||||
// invokeGo returns the stdout and stderr of a go command invocation.
|
||||
func invokeGo(ctx context.Context, env []string, dir string, usesExportData bool, args ...string) (*bytes.Buffer, *bytes.Buffer, error) {
|
||||
if debug {
|
||||
defer func(start time.Time) { log.Printf("%s for %v", time.Since(start), cmdDebugStr(env, args...)) }(time.Now())
|
||||
}
|
||||
stdout := new(bytes.Buffer)
|
||||
stderr := new(bytes.Buffer)
|
||||
cmd := exec.CommandContext(ctx, "go", args...)
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||
// process we fix up all the paths returned by the go command.
|
||||
cmd.Env = append(append([]string{}, env...), "PWD="+dir)
|
||||
cmd.Dir = dir
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
exitErr, ok := err.(*exec.ExitError)
|
||||
if !ok {
|
||||
// Catastrophic error:
|
||||
// - executable not found
|
||||
// - context cancellation
|
||||
return nil, nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err)
|
||||
}
|
||||
|
||||
// Export mode entails a build.
|
||||
// If that build fails, errors appear on stderr
|
||||
// (despite the -e flag) and the Export field is blank.
|
||||
// Do not fail in that case.
|
||||
if !usesExportData {
|
||||
return nil, nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
||||
}
|
||||
}
|
||||
|
||||
// As of writing, go list -export prints some non-fatal compilation
|
||||
// errors to stderr, even with -e set. We would prefer that it put
|
||||
// them in the Package.Error JSON (see https://golang.org/issue/26319).
|
||||
// In the meantime, there's nowhere good to put them, but they can
|
||||
// be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS
|
||||
// is set.
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(env, args...), stderr)
|
||||
}
|
||||
|
||||
// debugging
|
||||
if false {
|
||||
fmt.Fprintf(os.Stderr, "%s stdout: <<%s>>\n", cmdDebugStr(env, args...), stdout)
|
||||
}
|
||||
|
||||
return stdout, stderr, nil
|
||||
}
|
||||
|
||||
func cmdDebugStr(envlist []string, args ...string) string {
|
||||
env := make(map[string]string)
|
||||
for _, kv := range envlist {
|
||||
split := strings.Split(kv, "=")
|
||||
k, v := split[0], split[1]
|
||||
env[k] = v
|
||||
}
|
||||
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], args)
|
||||
}
|
||||
|
7
vendor/golang.org/x/tools/go/packages/BUILD
generated
vendored
7
vendor/golang.org/x/tools/go/packages/BUILD
generated
vendored
@@ -17,9 +17,10 @@ go_library(
|
||||
deps = [
|
||||
"//vendor/golang.org/x/tools/go/gcexportdata:go_default_library",
|
||||
"//vendor/golang.org/x/tools/go/internal/packagesdriver:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/gopathwalk:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/semver:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/span:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/gocommand:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/packagesinternal:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/typesinternal:go_default_library",
|
||||
"//vendor/golang.org/x/xerrors:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
3
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
@@ -60,8 +60,7 @@ causes Load to run in LoadFiles mode, collecting minimal information.
|
||||
See the documentation for type Config for details.
|
||||
|
||||
As noted earlier, the Config.Mode controls the amount of detail
|
||||
reported about the loaded packages, with each mode returning all the data of the
|
||||
previous mode with some extra added. See the documentation for type LoadMode
|
||||
reported about the loaded packages. See the documentation for type LoadMode
|
||||
for details.
|
||||
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
|
7
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
7
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
@@ -84,13 +84,14 @@ func findExternalDriver(cfg *Config) driver {
|
||||
cmd.Stdin = bytes.NewReader(req)
|
||||
cmd.Stdout = buf
|
||||
cmd.Stderr = stderr
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
|
||||
}
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
||||
}
|
||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
|
||||
}
|
||||
|
||||
var response driverResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||
return nil, err
|
||||
|
782
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
782
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
File diff suppressed because it is too large
Load Diff
288
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
288
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
@@ -1,12 +1,14 @@
|
||||
package packages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -16,15 +18,20 @@ import (
|
||||
// sometimes incorrect.
|
||||
// TODO(matloob): Handle unsupported cases, including the following:
|
||||
// - determining the correct package to add given a new import path
|
||||
func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() *goInfo) (modifiedPkgs, needPkgs []string, err error) {
|
||||
func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
|
||||
havePkgs := make(map[string]string) // importPath -> non-test package ID
|
||||
needPkgsSet := make(map[string]bool)
|
||||
modifiedPkgsSet := make(map[string]bool)
|
||||
|
||||
pkgOfDir := make(map[string][]*Package)
|
||||
for _, pkg := range response.dr.Packages {
|
||||
// This is an approximation of import path to id. This can be
|
||||
// wrong for tests, vendored packages, and a number of other cases.
|
||||
havePkgs[pkg.PkgPath] = pkg.ID
|
||||
x := commonDir(pkg.GoFiles)
|
||||
if x != "" {
|
||||
pkgOfDir[x] = append(pkgOfDir[x], pkg)
|
||||
}
|
||||
}
|
||||
|
||||
// If no new imports are added, it is safe to avoid loading any needPkgs.
|
||||
@@ -34,7 +41,23 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
|
||||
// potentially modifying the transitive set of dependencies).
|
||||
var overlayAddsImports bool
|
||||
|
||||
for opath, contents := range cfg.Overlay {
|
||||
// If both a package and its test package are created by the overlay, we
|
||||
// need the real package first. Process all non-test files before test
|
||||
// files, and make the whole process deterministic while we're at it.
|
||||
var overlayFiles []string
|
||||
for opath := range state.cfg.Overlay {
|
||||
overlayFiles = append(overlayFiles, opath)
|
||||
}
|
||||
sort.Slice(overlayFiles, func(i, j int) bool {
|
||||
iTest := strings.HasSuffix(overlayFiles[i], "_test.go")
|
||||
jTest := strings.HasSuffix(overlayFiles[j], "_test.go")
|
||||
if iTest != jTest {
|
||||
return !iTest // non-tests are before tests.
|
||||
}
|
||||
return overlayFiles[i] < overlayFiles[j]
|
||||
})
|
||||
for _, opath := range overlayFiles {
|
||||
contents := state.cfg.Overlay[opath]
|
||||
base := filepath.Base(opath)
|
||||
dir := filepath.Dir(opath)
|
||||
var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant
|
||||
@@ -47,6 +70,9 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
|
||||
// to the overlay.
|
||||
continue
|
||||
}
|
||||
// If all the overlay files belong to a different package, change the
|
||||
// package name to that package.
|
||||
maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir])
|
||||
nextPackage:
|
||||
for _, p := range response.dr.Packages {
|
||||
if pkgName != p.Name && p.ID != "command-line-arguments" {
|
||||
@@ -64,14 +90,8 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
|
||||
testVariantOf = p
|
||||
continue nextPackage
|
||||
}
|
||||
// We must have already seen the package of which this is a test variant.
|
||||
if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
|
||||
// If we've already seen the test variant,
|
||||
// make sure to label which package it is a test variant of.
|
||||
if hasTestFiles(pkg) {
|
||||
testVariantOf = p
|
||||
continue nextPackage
|
||||
}
|
||||
// If we have already seen the package of which this is a test variant.
|
||||
if hasTestFiles(p) {
|
||||
testVariantOf = pkg
|
||||
}
|
||||
@@ -86,7 +106,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
|
||||
if pkg == nil {
|
||||
// Try to find the module or gopath dir the file is contained in.
|
||||
// Then for modules, add the module opath to the beginning.
|
||||
pkgPath, ok := getPkgPath(cfg, dir, rootDirs)
|
||||
pkgPath, ok, err := state.getPkgPath(dir)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
@@ -98,23 +121,34 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
|
||||
if isTestFile && !isXTest {
|
||||
id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
|
||||
}
|
||||
// Try to reclaim a package with the same id if it exists in the response.
|
||||
// Try to reclaim a package with the same ID, if it exists in the response.
|
||||
for _, p := range response.dr.Packages {
|
||||
if reclaimPackage(p, id, opath, contents) {
|
||||
pkg = p
|
||||
break
|
||||
}
|
||||
}
|
||||
// Otherwise, create a new package
|
||||
// Otherwise, create a new package.
|
||||
if pkg == nil {
|
||||
pkg = &Package{PkgPath: pkgPath, ID: id, Name: pkgName, Imports: make(map[string]*Package)}
|
||||
pkg = &Package{
|
||||
PkgPath: pkgPath,
|
||||
ID: id,
|
||||
Name: pkgName,
|
||||
Imports: make(map[string]*Package),
|
||||
}
|
||||
response.addPackage(pkg)
|
||||
havePkgs[pkg.PkgPath] = id
|
||||
// Add the production package's sources for a test variant.
|
||||
if isTestFile && !isXTest && testVariantOf != nil {
|
||||
pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
|
||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
|
||||
// Add the package under test and its imports to the test variant.
|
||||
pkg.forTest = testVariantOf.PkgPath
|
||||
for k, v := range testVariantOf.Imports {
|
||||
pkg.Imports[k] = &Package{ID: v.ID}
|
||||
}
|
||||
}
|
||||
// TODO(rstambler): Handle forTest for x_tests.
|
||||
}
|
||||
}
|
||||
if !fileExists {
|
||||
@@ -130,42 +164,47 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
|
||||
continue
|
||||
}
|
||||
for _, imp := range imports {
|
||||
_, found := pkg.Imports[imp]
|
||||
if !found {
|
||||
overlayAddsImports = true
|
||||
// TODO(matloob): Handle cases when the following block isn't correct.
|
||||
// These include imports of vendored packages, etc.
|
||||
id, ok := havePkgs[imp]
|
||||
if !ok {
|
||||
id = imp
|
||||
}
|
||||
pkg.Imports[imp] = &Package{ID: id}
|
||||
// Add dependencies to the non-test variant version of this package as wel.
|
||||
if testVariantOf != nil {
|
||||
testVariantOf.Imports[imp] = &Package{ID: id}
|
||||
// TODO(rstambler): If the package is an x test and the import has
|
||||
// a test variant, make sure to replace it.
|
||||
if _, found := pkg.Imports[imp]; found {
|
||||
continue
|
||||
}
|
||||
overlayAddsImports = true
|
||||
id, ok := havePkgs[imp]
|
||||
if !ok {
|
||||
var err error
|
||||
id, err = state.resolveImport(dir, imp)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
pkg.Imports[imp] = &Package{ID: id}
|
||||
// Add dependencies to the non-test variant version of this package as well.
|
||||
if testVariantOf != nil {
|
||||
testVariantOf.Imports[imp] = &Package{ID: id}
|
||||
}
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// toPkgPath tries to guess the package path given the id.
|
||||
// This isn't always correct -- it's certainly wrong for
|
||||
// vendored packages' paths.
|
||||
toPkgPath := func(id string) string {
|
||||
// TODO(matloob): Handle vendor paths.
|
||||
i := strings.IndexByte(id, ' ')
|
||||
if i >= 0 {
|
||||
return id[:i]
|
||||
// toPkgPath guesses the package path given the id.
|
||||
toPkgPath := func(sourceDir, id string) (string, error) {
|
||||
if i := strings.IndexByte(id, ' '); i >= 0 {
|
||||
return state.resolveImport(sourceDir, id[:i])
|
||||
}
|
||||
return id
|
||||
return state.resolveImport(sourceDir, id)
|
||||
}
|
||||
|
||||
// Do another pass now that new packages have been created to determine the
|
||||
// set of missing packages.
|
||||
// Now that new packages have been created, do another pass to determine
|
||||
// the new set of missing packages.
|
||||
for _, pkg := range response.dr.Packages {
|
||||
for _, imp := range pkg.Imports {
|
||||
pkgPath := toPkgPath(imp.ID)
|
||||
if len(pkg.GoFiles) == 0 {
|
||||
return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath)
|
||||
}
|
||||
pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if _, ok := havePkgs[pkgPath]; !ok {
|
||||
needPkgsSet[pkgPath] = true
|
||||
}
|
||||
@@ -185,6 +224,52 @@ func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func(
|
||||
return modifiedPkgs, needPkgs, err
|
||||
}
|
||||
|
||||
// resolveImport finds the the ID of a package given its import path.
|
||||
// In particular, it will find the right vendored copy when in GOPATH mode.
|
||||
func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) {
|
||||
env, err := state.getEnv()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if env["GOMOD"] != "" {
|
||||
return importPath, nil
|
||||
}
|
||||
|
||||
searchDir := sourceDir
|
||||
for {
|
||||
vendorDir := filepath.Join(searchDir, "vendor")
|
||||
exists, ok := state.vendorDirs[vendorDir]
|
||||
if !ok {
|
||||
info, err := os.Stat(vendorDir)
|
||||
exists = err == nil && info.IsDir()
|
||||
state.vendorDirs[vendorDir] = exists
|
||||
}
|
||||
|
||||
if exists {
|
||||
vendoredPath := filepath.Join(vendorDir, importPath)
|
||||
if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() {
|
||||
// We should probably check for .go files here, but shame on anyone who fools us.
|
||||
path, ok, err := state.getPkgPath(vendoredPath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if ok {
|
||||
return path, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We know we've hit the top of the filesystem when we Dir / and get /,
|
||||
// or C:\ and get C:\, etc.
|
||||
next := filepath.Dir(searchDir)
|
||||
if next == searchDir {
|
||||
break
|
||||
}
|
||||
searchDir = next
|
||||
}
|
||||
return importPath, nil
|
||||
}
|
||||
|
||||
func hasTestFiles(p *Package) bool {
|
||||
for _, f := range p.GoFiles {
|
||||
if strings.HasSuffix(f, "_test.go") {
|
||||
@@ -194,44 +279,69 @@ func hasTestFiles(p *Package) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// determineRootDirs returns a mapping from directories code can be contained in to the
|
||||
// corresponding import path prefixes of those directories.
|
||||
// Its result is used to try to determine the import path for a package containing
|
||||
// an overlay file.
|
||||
func determineRootDirs(cfg *Config) map[string]string {
|
||||
// Assume modules first:
|
||||
out, err := invokeGo(cfg, "list", "-m", "-json", "all")
|
||||
// determineRootDirs returns a mapping from absolute directories that could
|
||||
// contain code to their corresponding import path prefixes.
|
||||
func (state *golistState) determineRootDirs() (map[string]string, error) {
|
||||
env, err := state.getEnv()
|
||||
if err != nil {
|
||||
return determineRootDirsGOPATH(cfg)
|
||||
return nil, err
|
||||
}
|
||||
if env["GOMOD"] != "" {
|
||||
state.rootsOnce.Do(func() {
|
||||
state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
|
||||
})
|
||||
} else {
|
||||
state.rootsOnce.Do(func() {
|
||||
state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
|
||||
})
|
||||
}
|
||||
return state.rootDirs, state.rootDirsError
|
||||
}
|
||||
|
||||
func (state *golistState) determineRootDirsModules() (map[string]string, error) {
|
||||
// This will only return the root directory for the main module.
|
||||
// For now we only support overlays in main modules.
|
||||
// Editing files in the module cache isn't a great idea, so we don't
|
||||
// plan to ever support that, but editing files in replaced modules
|
||||
// is something we may want to support. To do that, we'll want to
|
||||
// do a go list -m to determine the replaced module's module path and
|
||||
// directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}} <replaced module's path>
|
||||
// from the main module to determine if that module is actually a replacement.
|
||||
// See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751
|
||||
// for more information.
|
||||
out, err := state.invokeGo("list", "-m", "-json")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := map[string]string{}
|
||||
type jsonMod struct{ Path, Dir string }
|
||||
for dec := json.NewDecoder(out); dec.More(); {
|
||||
mod := new(jsonMod)
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return m // Give up and return an empty map. Package won't be found for overlay.
|
||||
return nil, err
|
||||
}
|
||||
if mod.Dir != "" && mod.Path != "" {
|
||||
// This is a valid module; add it to the map.
|
||||
m[mod.Dir] = mod.Path
|
||||
absDir, err := filepath.Abs(mod.Dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[absDir] = mod.Path
|
||||
}
|
||||
}
|
||||
return m
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func determineRootDirsGOPATH(cfg *Config) map[string]string {
|
||||
func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
|
||||
m := map[string]string{}
|
||||
out, err := invokeGo(cfg, "env", "GOPATH")
|
||||
if err != nil {
|
||||
// Could not determine root dir mapping. Everything is best-effort, so just return an empty map.
|
||||
// When we try to find the import path for a directory, there will be no root-dir match and
|
||||
// we'll give up.
|
||||
return m
|
||||
for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
|
||||
absDir, err := filepath.Abs(dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m[filepath.Join(absDir, "src")] = ""
|
||||
}
|
||||
for _, p := range filepath.SplitList(string(bytes.TrimSpace(out.Bytes()))) {
|
||||
m[filepath.Join(p, "src")] = ""
|
||||
}
|
||||
return m
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func extractImports(filename string, contents []byte) ([]string, error) {
|
||||
@@ -291,3 +401,57 @@ func extractPackageName(filename string, contents []byte) (string, bool) {
|
||||
}
|
||||
return f.Name.Name, true
|
||||
}
|
||||
|
||||
func commonDir(a []string) string {
|
||||
seen := make(map[string]bool)
|
||||
x := append([]string{}, a...)
|
||||
for _, f := range x {
|
||||
seen[filepath.Dir(f)] = true
|
||||
}
|
||||
if len(seen) > 1 {
|
||||
log.Fatalf("commonDir saw %v for %v", seen, x)
|
||||
}
|
||||
for k := range seen {
|
||||
// len(seen) == 1
|
||||
return k
|
||||
}
|
||||
return "" // no files
|
||||
}
|
||||
|
||||
// It is possible that the files in the disk directory dir have a different package
|
||||
// name from newName, which is deduced from the overlays. If they all have a different
|
||||
// package name, and they all have the same package name, then that name becomes
|
||||
// the package name.
|
||||
// It returns true if it changes the package name, false otherwise.
|
||||
func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) {
|
||||
names := make(map[string]int)
|
||||
for _, p := range pkgsOfDir {
|
||||
names[p.Name]++
|
||||
}
|
||||
if len(names) != 1 {
|
||||
// some files are in different packages
|
||||
return
|
||||
}
|
||||
var oldName string
|
||||
for k := range names {
|
||||
oldName = k
|
||||
}
|
||||
if newName == oldName {
|
||||
return
|
||||
}
|
||||
// We might have a case where all of the package names in the directory are
|
||||
// the same, but the overlay file is for an x test, which belongs to its
|
||||
// own package. If the x test does not yet exist on disk, we may not yet
|
||||
// have its package name on disk, but we should not rename the packages.
|
||||
//
|
||||
// We use a heuristic to determine if this file belongs to an x test:
|
||||
// The test file should have a package name whose package name has a _test
|
||||
// suffix or looks like "newName_test".
|
||||
maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test")
|
||||
if isTestFile && maybeXTest {
|
||||
return
|
||||
}
|
||||
for _, p := range pkgsOfDir {
|
||||
p.Name = newName
|
||||
}
|
||||
}
|
||||
|
2
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
2
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
@@ -38,7 +38,7 @@ var modeStrings = []string{
|
||||
func (mod LoadMode) String() string {
|
||||
m := mod
|
||||
if m == 0 {
|
||||
return fmt.Sprintf("LoadMode(0)")
|
||||
return "LoadMode(0)"
|
||||
}
|
||||
var out []string
|
||||
for i, x := range allModes {
|
||||
|
118
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
118
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -21,8 +21,12 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/go/gcexportdata"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/packagesinternal"
|
||||
"golang.org/x/tools/internal/typesinternal"
|
||||
)
|
||||
|
||||
// A LoadMode controls the amount of detail to return when loading.
|
||||
@@ -34,6 +38,9 @@ import (
|
||||
// Load may return more information than requested.
|
||||
type LoadMode int
|
||||
|
||||
// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to
|
||||
// NeedExportFile to make it consistent with the Package field it's adding.
|
||||
|
||||
const (
|
||||
// NeedName adds Name and PkgPath.
|
||||
NeedName LoadMode = 1 << iota
|
||||
@@ -51,7 +58,7 @@ const (
|
||||
// NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
|
||||
NeedDeps
|
||||
|
||||
// NeedExportsFile adds ExportsFile.
|
||||
// NeedExportsFile adds ExportFile.
|
||||
NeedExportsFile
|
||||
|
||||
// NeedTypes adds Types, Fset, and IllTyped.
|
||||
@@ -65,6 +72,13 @@ const (
|
||||
|
||||
// NeedTypesSizes adds TypesSizes.
|
||||
NeedTypesSizes
|
||||
|
||||
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
|
||||
// Modifies CompiledGoFiles and Types, and has no effect on its own.
|
||||
typecheckCgo
|
||||
|
||||
// NeedModule adds Module.
|
||||
NeedModule
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -123,6 +137,9 @@ type Config struct {
|
||||
//
|
||||
Env []string
|
||||
|
||||
// gocmdRunner guards go command calls from concurrency errors.
|
||||
gocmdRunner *gocommand.Runner
|
||||
|
||||
// BuildFlags is a list of command-line flags to be passed through to
|
||||
// the build system's query tool.
|
||||
BuildFlags []string
|
||||
@@ -160,7 +177,7 @@ type Config struct {
|
||||
Tests bool
|
||||
|
||||
// Overlay provides a mapping of absolute file paths to file contents.
|
||||
// If the file with the given path already exists, the parser will use the
|
||||
// If the file with the given path already exists, the parser will use the
|
||||
// alternative file contents provided by the map.
|
||||
//
|
||||
// Overlays provide incomplete support for when a given file doesn't
|
||||
@@ -174,6 +191,13 @@ type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
|
||||
|
||||
// driverResponse contains the results for a driver query.
|
||||
type driverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the driverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Sizes, if not nil, is the types.Sizes to use when type checking.
|
||||
Sizes *types.StdSizes
|
||||
|
||||
@@ -215,14 +239,22 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {
|
||||
return l.refine(response.Roots, response.Packages...)
|
||||
}
|
||||
|
||||
// defaultDriver is a driver that looks for an external driver binary, and if
|
||||
// it does not find it falls back to the built in go list driver.
|
||||
// defaultDriver is a driver that implements go/packages' fallback behavior.
|
||||
// It will try to request to an external driver, if one exists. If there's
|
||||
// no external driver, or the driver returns a response with NotHandled set,
|
||||
// defaultDriver will fall back to the go list driver.
|
||||
func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
driver := findExternalDriver(cfg)
|
||||
if driver == nil {
|
||||
driver = goListDriver
|
||||
}
|
||||
return driver(cfg, patterns...)
|
||||
response, err := driver(cfg, patterns...)
|
||||
if err != nil {
|
||||
return response, err
|
||||
} else if response.NotHandled {
|
||||
return goListDriver(cfg, patterns...)
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// A Package describes a loaded Go package.
|
||||
@@ -249,7 +281,7 @@ type Package struct {
|
||||
GoFiles []string
|
||||
|
||||
// CompiledGoFiles lists the absolute file paths of the package's source
|
||||
// files that were presented to the compiler.
|
||||
// files that are suitable for type checking.
|
||||
// This may differ from GoFiles if files are processed before compilation.
|
||||
CompiledGoFiles []string
|
||||
|
||||
@@ -292,6 +324,44 @@ type Package struct {
|
||||
|
||||
// TypesSizes provides the effective size function for types in TypesInfo.
|
||||
TypesSizes types.Sizes
|
||||
|
||||
// forTest is the package under test, if any.
|
||||
forTest string
|
||||
|
||||
// module is the module information for the package if it exists.
|
||||
Module *Module
|
||||
}
|
||||
|
||||
// Module provides module information for a package.
|
||||
type Module struct {
|
||||
Path string // module path
|
||||
Version string // module version
|
||||
Replace *Module // replaced by this module
|
||||
Time *time.Time // time version was created
|
||||
Main bool // is this the main module?
|
||||
Indirect bool // is this module only an indirect dependency of main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file used when loading this module, if any
|
||||
GoVersion string // go version used in module
|
||||
Error *ModuleError // error loading module
|
||||
}
|
||||
|
||||
// ModuleError holds errors loading a module.
|
||||
type ModuleError struct {
|
||||
Err string // the error itself
|
||||
}
|
||||
|
||||
func init() {
|
||||
packagesinternal.GetForTest = func(p interface{}) string {
|
||||
return p.(*Package).forTest
|
||||
}
|
||||
packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner {
|
||||
return config.(*Config).gocmdRunner
|
||||
}
|
||||
packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {
|
||||
config.(*Config).gocmdRunner = runner
|
||||
}
|
||||
packagesinternal.TypecheckCgo = int(typecheckCgo)
|
||||
}
|
||||
|
||||
// An Error describes a problem with a package's metadata, syntax, or types.
|
||||
@@ -454,6 +524,9 @@ func newLoader(cfg *Config) *loader {
|
||||
if ld.Config.Env == nil {
|
||||
ld.Config.Env = os.Environ()
|
||||
}
|
||||
if ld.Config.gocmdRunner == nil {
|
||||
ld.Config.gocmdRunner = &gocommand.Runner{}
|
||||
}
|
||||
if ld.Context == nil {
|
||||
ld.Context = context.Background()
|
||||
}
|
||||
@@ -500,12 +573,23 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
if i, found := rootMap[pkg.ID]; found {
|
||||
rootIndex = i
|
||||
}
|
||||
|
||||
// Overlays can invalidate export data.
|
||||
// TODO(matloob): make this check fine-grained based on dependencies on overlaid files
|
||||
exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
|
||||
// This package needs type information if the caller requested types and the package is
|
||||
// either a root, or it's a non-root and the user requested dependencies ...
|
||||
needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
|
||||
// This package needs source if the call requested source (or types info, which implies source)
|
||||
// and the package is either a root, or itas a non- root and the user requested dependencies...
|
||||
needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
|
||||
// ... or if we need types and the exportData is invalid. We fall back to (incompletely)
|
||||
// typechecking packages from source if they fail to compile.
|
||||
(ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
|
||||
lpkg := &loaderPackage{
|
||||
Package: pkg,
|
||||
needtypes: (ld.Mode&(NeedTypes|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0,
|
||||
needsrc: (ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && ld.Mode&NeedDeps != 0 && rootIndex < 0) || rootIndex >= 0 ||
|
||||
len(ld.Overlay) > 0 || // Overlays can invalidate export data. TODO(matloob): make this check fine-grained based on dependencies on overlaid files
|
||||
pkg.ExportFile == "" && pkg.PkgPath != "unsafe",
|
||||
needtypes: needtypes,
|
||||
needsrc: needsrc,
|
||||
}
|
||||
ld.pkgs[lpkg.ID] = lpkg
|
||||
if rootIndex >= 0 {
|
||||
@@ -660,6 +744,9 @@ func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
|
||||
if ld.requestedMode&NeedTypesSizes == 0 {
|
||||
ld.pkgs[i].TypesSizes = nil
|
||||
}
|
||||
if ld.requestedMode&NeedModule == 0 {
|
||||
ld.pkgs[i].Module = nil
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
@@ -713,7 +800,7 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
// The Diamond test exercises this case.
|
||||
if !lpkg.needtypes {
|
||||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
if !lpkg.needsrc {
|
||||
@@ -835,6 +922,15 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
Error: appendError,
|
||||
Sizes: ld.sizes,
|
||||
}
|
||||
if (ld.Mode & typecheckCgo) != 0 {
|
||||
if !typesinternal.SetUsesCgo(tc) {
|
||||
appendError(Error{
|
||||
Msg: "typecheckCgo requires Go 1.15+",
|
||||
Kind: ListError,
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
|
||||
|
||||
lpkg.importErrors = nil // no longer needed
|
||||
|
10
vendor/golang.org/x/tools/imports/forward.go
generated
vendored
10
vendor/golang.org/x/tools/imports/forward.go
generated
vendored
@@ -4,6 +4,8 @@ package imports // import "golang.org/x/tools/imports"
|
||||
|
||||
import (
|
||||
"go/build"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
intimp "golang.org/x/tools/internal/imports"
|
||||
)
|
||||
@@ -42,7 +44,10 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
||||
Env: &intimp.ProcessEnv{
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOROOT: build.Default.GOROOT,
|
||||
Debug: Debug,
|
||||
GOFLAGS: os.Getenv("GOFLAGS"),
|
||||
GO111MODULE: os.Getenv("GO111MODULE"),
|
||||
GOPROXY: os.Getenv("GOPROXY"),
|
||||
GOSUMDB: os.Getenv("GOSUMDB"),
|
||||
LocalPrefix: LocalPrefix,
|
||||
},
|
||||
AllErrors: opt.AllErrors,
|
||||
@@ -52,6 +57,9 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) {
|
||||
TabIndent: opt.TabIndent,
|
||||
TabWidth: opt.TabWidth,
|
||||
}
|
||||
if Debug {
|
||||
intopt.Env.Logf = log.Printf
|
||||
}
|
||||
return intimp.Process(filename, src, intopt)
|
||||
}
|
||||
|
||||
|
36
vendor/golang.org/x/tools/internal/event/BUILD
generated
vendored
Normal file
36
vendor/golang.org/x/tools/internal/event/BUILD
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"doc.go",
|
||||
"event.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/event",
|
||||
importpath = "golang.org/x/tools/internal/event",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/tools/internal/event/core:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/event/keys:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/event/label:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [
|
||||
":package-srcs",
|
||||
"//vendor/golang.org/x/tools/internal/event/core:all-srcs",
|
||||
"//vendor/golang.org/x/tools/internal/event/keys:all-srcs",
|
||||
"//vendor/golang.org/x/tools/internal/event/label:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
31
vendor/golang.org/x/tools/internal/event/core/BUILD
generated
vendored
Normal file
31
vendor/golang.org/x/tools/internal/event/core/BUILD
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"event.go",
|
||||
"export.go",
|
||||
"fast.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/event/core",
|
||||
importpath = "golang.org/x/tools/internal/event/core",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/tools/internal/event/keys:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/event/label:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
85
vendor/golang.org/x/tools/internal/event/core/event.go
generated
vendored
Normal file
85
vendor/golang.org/x/tools/internal/event/core/event.go
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package core provides support for event based telemetry.
|
||||
package core
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Event holds the information about an event of note that ocurred.
|
||||
type Event struct {
|
||||
at time.Time
|
||||
|
||||
// As events are often on the stack, storing the first few labels directly
|
||||
// in the event can avoid an allocation at all for the very common cases of
|
||||
// simple events.
|
||||
// The length needs to be large enough to cope with the majority of events
|
||||
// but no so large as to cause undue stack pressure.
|
||||
// A log message with two values will use 3 labels (one for each value and
|
||||
// one for the message itself).
|
||||
|
||||
static [3]label.Label // inline storage for the first few labels
|
||||
dynamic []label.Label // dynamically sized storage for remaining labels
|
||||
}
|
||||
|
||||
// eventLabelMap implements label.Map for a the labels of an Event.
|
||||
type eventLabelMap struct {
|
||||
event Event
|
||||
}
|
||||
|
||||
func (ev Event) At() time.Time { return ev.at }
|
||||
|
||||
func (ev Event) Format(f fmt.State, r rune) {
|
||||
if !ev.at.IsZero() {
|
||||
fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
|
||||
}
|
||||
for index := 0; ev.Valid(index); index++ {
|
||||
if l := ev.Label(index); l.Valid() {
|
||||
fmt.Fprintf(f, "\n\t%v", l)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ev Event) Valid(index int) bool {
|
||||
return index >= 0 && index < len(ev.static)+len(ev.dynamic)
|
||||
}
|
||||
|
||||
func (ev Event) Label(index int) label.Label {
|
||||
if index < len(ev.static) {
|
||||
return ev.static[index]
|
||||
}
|
||||
return ev.dynamic[index-len(ev.static)]
|
||||
}
|
||||
|
||||
func (ev Event) Find(key label.Key) label.Label {
|
||||
for _, l := range ev.static {
|
||||
if l.Key() == key {
|
||||
return l
|
||||
}
|
||||
}
|
||||
for _, l := range ev.dynamic {
|
||||
if l.Key() == key {
|
||||
return l
|
||||
}
|
||||
}
|
||||
return label.Label{}
|
||||
}
|
||||
|
||||
func MakeEvent(static [3]label.Label, labels []label.Label) Event {
|
||||
return Event{
|
||||
static: static,
|
||||
dynamic: labels,
|
||||
}
|
||||
}
|
||||
|
||||
// CloneEvent event returns a copy of the event with the time adjusted to at.
|
||||
func CloneEvent(ev Event, at time.Time) Event {
|
||||
ev.at = at
|
||||
return ev
|
||||
}
|
70
vendor/golang.org/x/tools/internal/event/core/export.go
generated
vendored
Normal file
70
vendor/golang.org/x/tools/internal/event/core/export.go
generated
vendored
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Exporter is a function that handles events.
|
||||
// It may return a modified context and event.
|
||||
type Exporter func(context.Context, Event, label.Map) context.Context
|
||||
|
||||
var (
|
||||
exporter unsafe.Pointer
|
||||
)
|
||||
|
||||
// SetExporter sets the global exporter function that handles all events.
|
||||
// The exporter is called synchronously from the event call site, so it should
|
||||
// return quickly so as not to hold up user code.
|
||||
func SetExporter(e Exporter) {
|
||||
p := unsafe.Pointer(&e)
|
||||
if e == nil {
|
||||
// &e is always valid, and so p is always valid, but for the early abort
|
||||
// of ProcessEvent to be efficient it needs to make the nil check on the
|
||||
// pointer without having to dereference it, so we make the nil function
|
||||
// also a nil pointer
|
||||
p = nil
|
||||
}
|
||||
atomic.StorePointer(&exporter, p)
|
||||
}
|
||||
|
||||
// deliver is called to deliver an event to the supplied exporter.
|
||||
// it will fill in the time.
|
||||
func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
|
||||
// add the current time to the event
|
||||
ev.at = time.Now()
|
||||
// hand the event off to the current exporter
|
||||
return exporter(ctx, ev, ev)
|
||||
}
|
||||
|
||||
// Export is called to deliver an event to the global exporter if set.
|
||||
func Export(ctx context.Context, ev Event) context.Context {
|
||||
// get the global exporter and abort early if there is not one
|
||||
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
|
||||
if exporterPtr == nil {
|
||||
return ctx
|
||||
}
|
||||
return deliver(ctx, *exporterPtr, ev)
|
||||
}
|
||||
|
||||
// ExportPair is called to deliver a start event to the supplied exporter.
|
||||
// It also returns a function that will deliver the end event to the same
|
||||
// exporter.
|
||||
// It will fill in the time.
|
||||
func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
|
||||
// get the global exporter and abort early if there is not one
|
||||
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
|
||||
if exporterPtr == nil {
|
||||
return ctx, func() {}
|
||||
}
|
||||
ctx = deliver(ctx, *exporterPtr, begin)
|
||||
return ctx, func() { deliver(ctx, *exporterPtr, end) }
|
||||
}
|
77
vendor/golang.org/x/tools/internal/event/core/fast.go
generated
vendored
Normal file
77
vendor/golang.org/x/tools/internal/event/core/fast.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package core
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Log1 takes a message and one label delivers a log event to the exporter.
|
||||
// It is a customized version of Print that is faster and does no allocation.
|
||||
func Log1(ctx context.Context, message string, t1 label.Label) {
|
||||
Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
t1,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Log2 takes a message and two labels and delivers a log event to the exporter.
|
||||
// It is a customized version of Print that is faster and does no allocation.
|
||||
func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
|
||||
Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
t1,
|
||||
t2,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Metric1 sends a label event to the exporter with the supplied labels.
|
||||
func Metric1(ctx context.Context, t1 label.Label) context.Context {
|
||||
return Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Metric.New(),
|
||||
t1,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Metric2 sends a label event to the exporter with the supplied labels.
|
||||
func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
|
||||
return Export(ctx, MakeEvent([3]label.Label{
|
||||
keys.Metric.New(),
|
||||
t1,
|
||||
t2,
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Start1 sends a span start event with the supplied label list to the exporter.
|
||||
// It also returns a function that will end the span, which should normally be
|
||||
// deferred.
|
||||
func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
|
||||
return ExportPair(ctx,
|
||||
MakeEvent([3]label.Label{
|
||||
keys.Start.Of(name),
|
||||
t1,
|
||||
}, nil),
|
||||
MakeEvent([3]label.Label{
|
||||
keys.End.New(),
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// Start2 sends a span start event with the supplied label list to the exporter.
|
||||
// It also returns a function that will end the span, which should normally be
|
||||
// deferred.
|
||||
func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
|
||||
return ExportPair(ctx,
|
||||
MakeEvent([3]label.Label{
|
||||
keys.Start.Of(name),
|
||||
t1,
|
||||
t2,
|
||||
}, nil),
|
||||
MakeEvent([3]label.Label{
|
||||
keys.End.New(),
|
||||
}, nil))
|
||||
}
|
7
vendor/golang.org/x/tools/internal/event/doc.go
generated
vendored
Normal file
7
vendor/golang.org/x/tools/internal/event/doc.go
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package event provides a set of packages that cover the main
|
||||
// concepts of telemetry in an implementation agnostic way.
|
||||
package event
|
127
vendor/golang.org/x/tools/internal/event/event.go
generated
vendored
Normal file
127
vendor/golang.org/x/tools/internal/event/event.go
generated
vendored
Normal file
@@ -0,0 +1,127 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package event
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"golang.org/x/tools/internal/event/core"
|
||||
"golang.org/x/tools/internal/event/keys"
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Exporter is a function that handles events.
|
||||
// It may return a modified context and event.
|
||||
type Exporter func(context.Context, core.Event, label.Map) context.Context
|
||||
|
||||
// SetExporter sets the global exporter function that handles all events.
|
||||
// The exporter is called synchronously from the event call site, so it should
|
||||
// return quickly so as not to hold up user code.
|
||||
func SetExporter(e Exporter) {
|
||||
core.SetExporter(core.Exporter(e))
|
||||
}
|
||||
|
||||
// Log takes a message and a label list and combines them into a single event
|
||||
// before delivering them to the exporter.
|
||||
func Log(ctx context.Context, message string, labels ...label.Label) {
|
||||
core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsLog returns true if the event was built by the Log function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsLog(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Msg
|
||||
}
|
||||
|
||||
// Error takes a message and a label list and combines them into a single event
|
||||
// before delivering them to the exporter. It captures the error in the
|
||||
// delivered event.
|
||||
func Error(ctx context.Context, message string, err error, labels ...label.Label) {
|
||||
core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Msg.Of(message),
|
||||
keys.Err.Of(err),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsError returns true if the event was built by the Error function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsError(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Msg &&
|
||||
ev.Label(1).Key() == keys.Err
|
||||
}
|
||||
|
||||
// Metric sends a label event to the exporter with the supplied labels.
|
||||
func Metric(ctx context.Context, labels ...label.Label) {
|
||||
core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Metric.New(),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsMetric returns true if the event was built by the Metric function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsMetric(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Metric
|
||||
}
|
||||
|
||||
// Label sends a label event to the exporter with the supplied labels.
|
||||
func Label(ctx context.Context, labels ...label.Label) context.Context {
|
||||
return core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Label.New(),
|
||||
}, labels))
|
||||
}
|
||||
|
||||
// IsLabel returns true if the event was built by the Label function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsLabel(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Label
|
||||
}
|
||||
|
||||
// Start sends a span start event with the supplied label list to the exporter.
|
||||
// It also returns a function that will end the span, which should normally be
|
||||
// deferred.
|
||||
func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
|
||||
return core.ExportPair(ctx,
|
||||
core.MakeEvent([3]label.Label{
|
||||
keys.Start.Of(name),
|
||||
}, labels),
|
||||
core.MakeEvent([3]label.Label{
|
||||
keys.End.New(),
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// IsStart returns true if the event was built by the Start function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsStart(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Start
|
||||
}
|
||||
|
||||
// IsEnd returns true if the event was built by the End function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsEnd(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.End
|
||||
}
|
||||
|
||||
// Detach returns a context without an associated span.
|
||||
// This allows the creation of spans that are not children of the current span.
|
||||
func Detach(ctx context.Context) context.Context {
|
||||
return core.Export(ctx, core.MakeEvent([3]label.Label{
|
||||
keys.Detach.New(),
|
||||
}, nil))
|
||||
}
|
||||
|
||||
// IsDetach returns true if the event was built by the Detach function.
|
||||
// It is intended to be used in exporters to identify the semantics of the
|
||||
// event when deciding what to do with it.
|
||||
func IsDetach(ev core.Event) bool {
|
||||
return ev.Label(0).Key() == keys.Detach
|
||||
}
|
@@ -3,17 +3,13 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"parse.go",
|
||||
"span.go",
|
||||
"token.go",
|
||||
"token111.go",
|
||||
"token112.go",
|
||||
"uri.go",
|
||||
"utf16.go",
|
||||
"keys.go",
|
||||
"standard.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/span",
|
||||
importpath = "golang.org/x/tools/internal/span",
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/event/keys",
|
||||
importpath = "golang.org/x/tools/internal/event/keys",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = ["//vendor/golang.org/x/tools/internal/event/label:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
564
vendor/golang.org/x/tools/internal/event/keys/keys.go
generated
vendored
Normal file
564
vendor/golang.org/x/tools/internal/event/keys/keys.go
generated
vendored
Normal file
@@ -0,0 +1,564 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package keys
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/tools/internal/event/label"
|
||||
)
|
||||
|
||||
// Value represents a key for untyped values.
|
||||
type Value struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// New creates a new Key for untyped values.
|
||||
func New(name, description string) *Value {
|
||||
return &Value{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Value) Name() string { return k.name }
|
||||
func (k *Value) Description() string { return k.description }
|
||||
|
||||
func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
fmt.Fprint(w, k.From(l))
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Value) Get(lm label.Map) interface{} {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
|
||||
|
||||
// Tag represents a key for tagging labels that have no value.
|
||||
// These are used when the existence of the label is the entire information it
|
||||
// carries, such as marking events to be of a specific kind, or from a specific
|
||||
// package.
|
||||
type Tag struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewTag creates a new Key for tagging labels.
|
||||
func NewTag(name, description string) *Tag {
|
||||
return &Tag{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Tag) Name() string { return k.name }
|
||||
func (k *Tag) Description() string { return k.description }
|
||||
|
||||
func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
|
||||
|
||||
// New creates a new Label with this key.
|
||||
func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
|
||||
|
||||
// Int represents a key
|
||||
type Int struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt creates a new Key for int values.
|
||||
func NewInt(name, description string) *Int {
|
||||
return &Int{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int) Name() string { return k.name }
|
||||
func (k *Int) Description() string { return k.description }
|
||||
|
||||
func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int) Get(lm label.Map) int {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
|
||||
|
||||
// Int8 represents a key
|
||||
type Int8 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt8 creates a new Key for int8 values.
|
||||
func NewInt8(name, description string) *Int8 {
|
||||
return &Int8{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int8) Name() string { return k.name }
|
||||
func (k *Int8) Description() string { return k.description }
|
||||
|
||||
func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int8) Get(lm label.Map) int8 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
|
||||
|
||||
// Int16 represents a key
|
||||
type Int16 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt16 creates a new Key for int16 values.
|
||||
func NewInt16(name, description string) *Int16 {
|
||||
return &Int16{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int16) Name() string { return k.name }
|
||||
func (k *Int16) Description() string { return k.description }
|
||||
|
||||
func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int16) Get(lm label.Map) int16 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
|
||||
|
||||
// Int32 represents a key
|
||||
type Int32 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt32 creates a new Key for int32 values.
|
||||
func NewInt32(name, description string) *Int32 {
|
||||
return &Int32{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int32) Name() string { return k.name }
|
||||
func (k *Int32) Description() string { return k.description }
|
||||
|
||||
func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int32) Get(lm label.Map) int32 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
|
||||
|
||||
// Int64 represents a key
|
||||
type Int64 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewInt64 creates a new Key for int64 values.
|
||||
func NewInt64(name, description string) *Int64 {
|
||||
return &Int64{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Int64) Name() string { return k.name }
|
||||
func (k *Int64) Description() string { return k.description }
|
||||
|
||||
func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendInt(buf, k.From(l), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Int64) Get(lm label.Map) int64 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
|
||||
|
||||
// UInt represents a key
|
||||
type UInt struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt creates a new Key for uint values.
|
||||
func NewUInt(name, description string) *UInt {
|
||||
return &UInt{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt) Name() string { return k.name }
|
||||
func (k *UInt) Description() string { return k.description }
|
||||
|
||||
func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt) Get(lm label.Map) uint {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
|
||||
|
||||
// UInt8 represents a key
|
||||
type UInt8 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt8 creates a new Key for uint8 values.
|
||||
func NewUInt8(name, description string) *UInt8 {
|
||||
return &UInt8{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt8) Name() string { return k.name }
|
||||
func (k *UInt8) Description() string { return k.description }
|
||||
|
||||
func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt8) Get(lm label.Map) uint8 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
|
||||
|
||||
// UInt16 represents a key
|
||||
type UInt16 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt16 creates a new Key for uint16 values.
|
||||
func NewUInt16(name, description string) *UInt16 {
|
||||
return &UInt16{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt16) Name() string { return k.name }
|
||||
func (k *UInt16) Description() string { return k.description }
|
||||
|
||||
func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt16) Get(lm label.Map) uint16 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
|
||||
|
||||
// UInt32 represents a key
|
||||
type UInt32 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt32 creates a new Key for uint32 values.
|
||||
func NewUInt32(name, description string) *UInt32 {
|
||||
return &UInt32{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt32) Name() string { return k.name }
|
||||
func (k *UInt32) Description() string { return k.description }
|
||||
|
||||
func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt32) Get(lm label.Map) uint32 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
|
||||
|
||||
// UInt64 represents a key
|
||||
type UInt64 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewUInt64 creates a new Key for uint64 values.
|
||||
func NewUInt64(name, description string) *UInt64 {
|
||||
return &UInt64{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *UInt64) Name() string { return k.name }
|
||||
func (k *UInt64) Description() string { return k.description }
|
||||
|
||||
func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendUint(buf, k.From(l), 10))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *UInt64) Get(lm label.Map) uint64 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
|
||||
|
||||
// Float32 represents a key
|
||||
type Float32 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewFloat32 creates a new Key for float32 values.
|
||||
func NewFloat32(name, description string) *Float32 {
|
||||
return &Float32{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Float32) Name() string { return k.name }
|
||||
func (k *Float32) Description() string { return k.description }
|
||||
|
||||
func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Float32) Of(v float32) label.Label {
|
||||
return label.Of64(k, uint64(math.Float32bits(v)))
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Float32) Get(lm label.Map) float32 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Float32) From(t label.Label) float32 {
|
||||
return math.Float32frombits(uint32(t.Unpack64()))
|
||||
}
|
||||
|
||||
// Float64 represents a key
|
||||
type Float64 struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewFloat64 creates a new Key for int64 values.
|
||||
func NewFloat64(name, description string) *Float64 {
|
||||
return &Float64{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Float64) Name() string { return k.name }
|
||||
func (k *Float64) Description() string { return k.description }
|
||||
|
||||
func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Float64) Of(v float64) label.Label {
|
||||
return label.Of64(k, math.Float64bits(v))
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Float64) Get(lm label.Map) float64 {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Float64) From(t label.Label) float64 {
|
||||
return math.Float64frombits(t.Unpack64())
|
||||
}
|
||||
|
||||
// String represents a key
|
||||
type String struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewString creates a new Key for int64 values.
|
||||
func NewString(name, description string) *String {
|
||||
return &String{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *String) Name() string { return k.name }
|
||||
func (k *String) Description() string { return k.description }
|
||||
|
||||
func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendQuote(buf, k.From(l)))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *String) Get(lm label.Map) string {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *String) From(t label.Label) string { return t.UnpackString() }
|
||||
|
||||
// Boolean represents a key
|
||||
type Boolean struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewBoolean creates a new Key for bool values.
|
||||
func NewBoolean(name, description string) *Boolean {
|
||||
return &Boolean{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Boolean) Name() string { return k.name }
|
||||
func (k *Boolean) Description() string { return k.description }
|
||||
|
||||
func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
w.Write(strconv.AppendBool(buf, k.From(l)))
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Boolean) Of(v bool) label.Label {
|
||||
if v {
|
||||
return label.Of64(k, 1)
|
||||
}
|
||||
return label.Of64(k, 0)
|
||||
}
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Boolean) Get(lm label.Map) bool {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
|
||||
|
||||
// Error represents a key
|
||||
type Error struct {
|
||||
name string
|
||||
description string
|
||||
}
|
||||
|
||||
// NewError creates a new Key for int64 values.
|
||||
func NewError(name, description string) *Error {
|
||||
return &Error{name: name, description: description}
|
||||
}
|
||||
|
||||
func (k *Error) Name() string { return k.name }
|
||||
func (k *Error) Description() string { return k.description }
|
||||
|
||||
func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
|
||||
io.WriteString(w, k.From(l).Error())
|
||||
}
|
||||
|
||||
// Of creates a new Label with this key and the supplied value.
|
||||
func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
|
||||
|
||||
// Get can be used to get a label for the key from a label.Map.
|
||||
func (k *Error) Get(lm label.Map) error {
|
||||
if t := lm.Find(k); t.Valid() {
|
||||
return k.From(t)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// From can be used to get a value from a Label.
|
||||
func (k *Error) From(t label.Label) error {
|
||||
err, _ := t.UnpackValue().(error)
|
||||
return err
|
||||
}
|
22
vendor/golang.org/x/tools/internal/event/keys/standard.go
generated
vendored
Normal file
22
vendor/golang.org/x/tools/internal/event/keys/standard.go
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package keys
|
||||
|
||||
var (
|
||||
// Msg is a key used to add message strings to label lists.
|
||||
Msg = NewString("message", "a readable message")
|
||||
// Label is a key used to indicate an event adds labels to the context.
|
||||
Label = NewTag("label", "a label context marker")
|
||||
// Start is used for things like traces that have a name.
|
||||
Start = NewString("start", "span start")
|
||||
// Metric is a key used to indicate an event records metrics.
|
||||
End = NewTag("end", "a span end marker")
|
||||
// Metric is a key used to indicate an event records metrics.
|
||||
Detach = NewTag("detach", "a span detach marker")
|
||||
// Err is a key used to add error values to label lists.
|
||||
Err = NewError("error", "an error that occurred")
|
||||
// Metric is a key used to indicate an event records metrics.
|
||||
Metric = NewTag("metric", "a metric event marker")
|
||||
)
|
@@ -2,9 +2,9 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["semver.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/semver",
|
||||
importpath = "golang.org/x/tools/internal/semver",
|
||||
srcs = ["label.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/event/label",
|
||||
importpath = "golang.org/x/tools/internal/event/label",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
)
|
||||
|
213
vendor/golang.org/x/tools/internal/event/label/label.go
generated
vendored
Normal file
213
vendor/golang.org/x/tools/internal/event/label/label.go
generated
vendored
Normal file
@@ -0,0 +1,213 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package label
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Key is used as the identity of a Label.
|
||||
// Keys are intended to be compared by pointer only, the name should be unique
|
||||
// for communicating with external systems, but it is not required or enforced.
|
||||
type Key interface {
|
||||
// Name returns the key name.
|
||||
Name() string
|
||||
// Description returns a string that can be used to describe the value.
|
||||
Description() string
|
||||
|
||||
// Format is used in formatting to append the value of the label to the
|
||||
// supplied buffer.
|
||||
// The formatter may use the supplied buf as a scratch area to avoid
|
||||
// allocations.
|
||||
Format(w io.Writer, buf []byte, l Label)
|
||||
}
|
||||
|
||||
// Label holds a key and value pair.
|
||||
// It is normally used when passing around lists of labels.
|
||||
type Label struct {
|
||||
key Key
|
||||
packed uint64
|
||||
untyped interface{}
|
||||
}
|
||||
|
||||
// Map is the interface to a collection of Labels indexed by key.
|
||||
type Map interface {
|
||||
// Find returns the label that matches the supplied key.
|
||||
Find(key Key) Label
|
||||
}
|
||||
|
||||
// List is the interface to something that provides an iterable
|
||||
// list of labels.
|
||||
// Iteration should start from 0 and continue until Valid returns false.
|
||||
type List interface {
|
||||
// Valid returns true if the index is within range for the list.
|
||||
// It does not imply the label at that index will itself be valid.
|
||||
Valid(index int) bool
|
||||
// Label returns the label at the given index.
|
||||
Label(index int) Label
|
||||
}
|
||||
|
||||
// list implements LabelList for a list of Labels.
|
||||
type list struct {
|
||||
labels []Label
|
||||
}
|
||||
|
||||
// filter wraps a LabelList filtering out specific labels.
|
||||
type filter struct {
|
||||
keys []Key
|
||||
underlying List
|
||||
}
|
||||
|
||||
// listMap implements LabelMap for a simple list of labels.
|
||||
type listMap struct {
|
||||
labels []Label
|
||||
}
|
||||
|
||||
// mapChain implements LabelMap for a list of underlying LabelMap.
|
||||
type mapChain struct {
|
||||
maps []Map
|
||||
}
|
||||
|
||||
// OfValue creates a new label from the key and value.
|
||||
// This method is for implementing new key types, label creation should
|
||||
// normally be done with the Of method of the key.
|
||||
func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
|
||||
|
||||
// UnpackValue assumes the label was built using LabelOfValue and returns the value
|
||||
// that was passed to that constructor.
|
||||
// This method is for implementing new key types, for type safety normal
|
||||
// access should be done with the From method of the key.
|
||||
func (t Label) UnpackValue() interface{} { return t.untyped }
|
||||
|
||||
// Of64 creates a new label from a key and a uint64. This is often
|
||||
// used for non uint64 values that can be packed into a uint64.
|
||||
// This method is for implementing new key types, label creation should
|
||||
// normally be done with the Of method of the key.
|
||||
func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
|
||||
|
||||
// Unpack64 assumes the label was built using LabelOf64 and returns the value that
|
||||
// was passed to that constructor.
|
||||
// This method is for implementing new key types, for type safety normal
|
||||
// access should be done with the From method of the key.
|
||||
func (t Label) Unpack64() uint64 { return t.packed }
|
||||
|
||||
// OfString creates a new label from a key and a string.
|
||||
// This method is for implementing new key types, label creation should
|
||||
// normally be done with the Of method of the key.
|
||||
func OfString(k Key, v string) Label {
|
||||
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
|
||||
return Label{
|
||||
key: k,
|
||||
packed: uint64(hdr.Len),
|
||||
untyped: unsafe.Pointer(hdr.Data),
|
||||
}
|
||||
}
|
||||
|
||||
// UnpackString assumes the label was built using LabelOfString and returns the
|
||||
// value that was passed to that constructor.
|
||||
// This method is for implementing new key types, for type safety normal
|
||||
// access should be done with the From method of the key.
|
||||
func (t Label) UnpackString() string {
|
||||
var v string
|
||||
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
|
||||
hdr.Data = uintptr(t.untyped.(unsafe.Pointer))
|
||||
hdr.Len = int(t.packed)
|
||||
return *(*string)(unsafe.Pointer(hdr))
|
||||
}
|
||||
|
||||
// Valid returns true if the Label is a valid one (it has a key).
|
||||
func (t Label) Valid() bool { return t.key != nil }
|
||||
|
||||
// Key returns the key of this Label.
|
||||
func (t Label) Key() Key { return t.key }
|
||||
|
||||
// Format is used for debug printing of labels.
|
||||
func (t Label) Format(f fmt.State, r rune) {
|
||||
if !t.Valid() {
|
||||
io.WriteString(f, `nil`)
|
||||
return
|
||||
}
|
||||
io.WriteString(f, t.Key().Name())
|
||||
io.WriteString(f, "=")
|
||||
var buf [128]byte
|
||||
t.Key().Format(f, buf[:0], t)
|
||||
}
|
||||
|
||||
func (l *list) Valid(index int) bool {
|
||||
return index >= 0 && index < len(l.labels)
|
||||
}
|
||||
|
||||
func (l *list) Label(index int) Label {
|
||||
return l.labels[index]
|
||||
}
|
||||
|
||||
func (f *filter) Valid(index int) bool {
|
||||
return f.underlying.Valid(index)
|
||||
}
|
||||
|
||||
func (f *filter) Label(index int) Label {
|
||||
l := f.underlying.Label(index)
|
||||
for _, f := range f.keys {
|
||||
if l.Key() == f {
|
||||
return Label{}
|
||||
}
|
||||
}
|
||||
return l
|
||||
}
|
||||
|
||||
func (lm listMap) Find(key Key) Label {
|
||||
for _, l := range lm.labels {
|
||||
if l.Key() == key {
|
||||
return l
|
||||
}
|
||||
}
|
||||
return Label{}
|
||||
}
|
||||
|
||||
func (c mapChain) Find(key Key) Label {
|
||||
for _, src := range c.maps {
|
||||
l := src.Find(key)
|
||||
if l.Valid() {
|
||||
return l
|
||||
}
|
||||
}
|
||||
return Label{}
|
||||
}
|
||||
|
||||
var emptyList = &list{}
|
||||
|
||||
func NewList(labels ...Label) List {
|
||||
if len(labels) == 0 {
|
||||
return emptyList
|
||||
}
|
||||
return &list{labels: labels}
|
||||
}
|
||||
|
||||
func Filter(l List, keys ...Key) List {
|
||||
if len(keys) == 0 {
|
||||
return l
|
||||
}
|
||||
return &filter{keys: keys, underlying: l}
|
||||
}
|
||||
|
||||
func NewMap(labels ...Label) Map {
|
||||
return listMap{labels: labels}
|
||||
}
|
||||
|
||||
func MergeMaps(srcs ...Map) Map {
|
||||
var nonNil []Map
|
||||
for _, src := range srcs {
|
||||
if src != nil {
|
||||
nonNil = append(nonNil, src)
|
||||
}
|
||||
}
|
||||
if len(nonNil) == 1 {
|
||||
return nonNil[0]
|
||||
}
|
||||
return mapChain{maps: nonNil}
|
||||
}
|
10
vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
generated
vendored
10
vendor/golang.org/x/tools/internal/fastwalk/fastwalk.go
generated
vendored
@@ -14,14 +14,14 @@ import (
|
||||
"sync"
|
||||
)
|
||||
|
||||
// TraverseLink is used as a return value from WalkFuncs to indicate that the
|
||||
// ErrTraverseLink is used as a return value from WalkFuncs to indicate that the
|
||||
// symlink named in the call may be traversed.
|
||||
var TraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
||||
var ErrTraverseLink = errors.New("fastwalk: traverse symlink, assuming target is a directory")
|
||||
|
||||
// SkipFiles is a used as a return value from WalkFuncs to indicate that the
|
||||
// ErrSkipFiles is a used as a return value from WalkFuncs to indicate that the
|
||||
// callback should not be called for any other files in the current directory.
|
||||
// Child directories will still be traversed.
|
||||
var SkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
||||
var ErrSkipFiles = errors.New("fastwalk: skip remaining files in directory")
|
||||
|
||||
// Walk is a faster implementation of filepath.Walk.
|
||||
//
|
||||
@@ -167,7 +167,7 @@ func (w *walker) onDirEnt(dirName, baseName string, typ os.FileMode) error {
|
||||
|
||||
err := w.fn(joined, typ)
|
||||
if typ == os.ModeSymlink {
|
||||
if err == TraverseLink {
|
||||
if err == ErrTraverseLink {
|
||||
// Set callbackDone so we don't call it twice for both the
|
||||
// symlink-as-symlink and the symlink-as-directory later:
|
||||
w.enqueue(walkItem{dir: joined, callbackDone: true})
|
||||
|
2
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
generated
vendored
2
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_portable.go
generated
vendored
@@ -26,7 +26,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
||||
continue
|
||||
}
|
||||
if err := fn(dirName, fi.Name(), fi.Mode()&os.ModeType); err != nil {
|
||||
if err == SkipFiles {
|
||||
if err == ErrSkipFiles {
|
||||
skipFiles = true
|
||||
continue
|
||||
}
|
||||
|
7
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
generated
vendored
7
vendor/golang.org/x/tools/internal/fastwalk/fastwalk_unix.go
generated
vendored
@@ -66,7 +66,7 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
||||
continue
|
||||
}
|
||||
if err := fn(dirName, name, typ); err != nil {
|
||||
if err == SkipFiles {
|
||||
if err == ErrSkipFiles {
|
||||
skipFiles = true
|
||||
continue
|
||||
}
|
||||
@@ -76,8 +76,9 @@ func readDir(dirName string, fn func(dirName, entName string, typ os.FileMode) e
|
||||
}
|
||||
|
||||
func parseDirEnt(buf []byte) (consumed int, name string, typ os.FileMode) {
|
||||
// golang.org/issue/15653
|
||||
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
||||
// golang.org/issue/37269
|
||||
dirent := &syscall.Dirent{}
|
||||
copy((*[unsafe.Sizeof(syscall.Dirent{})]byte)(unsafe.Pointer(dirent))[:], buf)
|
||||
if v := unsafe.Offsetof(dirent.Reclen) + unsafe.Sizeof(dirent.Reclen); uintptr(len(buf)) < v {
|
||||
panic(fmt.Sprintf("buf size of %d smaller than dirent header size %d", len(buf), v))
|
||||
}
|
||||
|
30
vendor/golang.org/x/tools/internal/gocommand/BUILD
generated
vendored
Normal file
30
vendor/golang.org/x/tools/internal/gocommand/BUILD
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"invoke.go",
|
||||
"vendor.go",
|
||||
],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/gocommand",
|
||||
importpath = "golang.org/x/tools/internal/gocommand",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/mod/semver:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/event:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
210
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
Normal file
210
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
Normal file
@@ -0,0 +1,210 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gocommand is a helper for calling the go command.
|
||||
package gocommand
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/internal/event"
|
||||
)
|
||||
|
||||
// An Runner will run go command invocations and serialize
|
||||
// them if it sees a concurrency error.
|
||||
type Runner struct {
|
||||
// LoadMu guards packages.Load calls and associated state.
|
||||
loadMu sync.Mutex
|
||||
// locked is true when we hold the mutex and have incremented.
|
||||
locked bool
|
||||
serializeLoads int
|
||||
}
|
||||
|
||||
// 1.13: go: updates to go.mod needed, but contents have changed
|
||||
// 1.14: go: updating go.mod: existing contents have changed since last read
|
||||
var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed`)
|
||||
|
||||
// Run is a convenience wrapper around RunRaw.
|
||||
// It returns only stdout and a "friendly" error.
|
||||
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
|
||||
stdout, _, friendly, _ := runner.runRaw(ctx, inv)
|
||||
return stdout, friendly
|
||||
}
|
||||
|
||||
// RunPiped runs the invocation serially, always waiting for any concurrent
|
||||
// invocations to complete first.
|
||||
func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) error {
|
||||
_, err := runner.runPiped(ctx, inv, stdout, stderr)
|
||||
return err
|
||||
}
|
||||
|
||||
// RunRaw runs the invocation, serializing requests only if they fight over
|
||||
// go.mod changes.
|
||||
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||
return runner.runRaw(ctx, inv)
|
||||
}
|
||||
|
||||
func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
|
||||
runner.loadMu.Lock()
|
||||
runner.serializeLoads++
|
||||
runner.locked = true
|
||||
|
||||
defer func() {
|
||||
runner.locked = false
|
||||
runner.serializeLoads--
|
||||
runner.loadMu.Unlock()
|
||||
}()
|
||||
|
||||
return inv.runWithFriendlyError(ctx, stdout, stderr)
|
||||
}
|
||||
|
||||
func (runner *Runner) runRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||
// We want to run invocations concurrently as much as possible. However,
|
||||
// if go.mod updates are needed, only one can make them and the others will
|
||||
// fail. We need to retry in those cases, but we don't want to thrash so
|
||||
// badly we never recover. To avoid that, once we've seen one concurrency
|
||||
// error, start serializing everything until the backlog has cleared out.
|
||||
runner.loadMu.Lock()
|
||||
if runner.serializeLoads == 0 {
|
||||
runner.loadMu.Unlock()
|
||||
} else {
|
||||
runner.locked = true
|
||||
runner.serializeLoads++
|
||||
}
|
||||
defer func() {
|
||||
if runner.locked {
|
||||
runner.locked = false
|
||||
runner.serializeLoads--
|
||||
runner.loadMu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||
friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
|
||||
if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) {
|
||||
return stdout, stderr, friendlyErr, err
|
||||
}
|
||||
event.Error(ctx, "Load concurrency error, will retry serially", err)
|
||||
if !runner.locked {
|
||||
runner.loadMu.Lock()
|
||||
runner.serializeLoads++
|
||||
runner.locked = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// An Invocation represents a call to the go command.
|
||||
type Invocation struct {
|
||||
Verb string
|
||||
Args []string
|
||||
BuildFlags []string
|
||||
Env []string
|
||||
WorkingDir string
|
||||
Logf func(format string, args ...interface{})
|
||||
}
|
||||
|
||||
func (i *Invocation) runWithFriendlyError(ctx context.Context, stdout, stderr io.Writer) (friendlyError error, rawError error) {
|
||||
rawError = i.run(ctx, stdout, stderr)
|
||||
if rawError != nil {
|
||||
friendlyError = rawError
|
||||
// Check for 'go' executable not being found.
|
||||
if ee, ok := rawError.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
||||
friendlyError = fmt.Errorf("go command required, not found: %v", ee)
|
||||
}
|
||||
if ctx.Err() != nil {
|
||||
friendlyError = ctx.Err()
|
||||
}
|
||||
friendlyError = fmt.Errorf("err: %v: stderr: %s", friendlyError, stderr)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||
log := i.Logf
|
||||
if log == nil {
|
||||
log = func(string, ...interface{}) {}
|
||||
}
|
||||
|
||||
goArgs := []string{i.Verb}
|
||||
switch i.Verb {
|
||||
case "mod":
|
||||
// mod needs the sub-verb before build flags.
|
||||
goArgs = append(goArgs, i.Args[0])
|
||||
goArgs = append(goArgs, i.BuildFlags...)
|
||||
goArgs = append(goArgs, i.Args[1:]...)
|
||||
case "env":
|
||||
// env doesn't take build flags.
|
||||
goArgs = append(goArgs, i.Args...)
|
||||
default:
|
||||
goArgs = append(goArgs, i.BuildFlags...)
|
||||
goArgs = append(goArgs, i.Args...)
|
||||
}
|
||||
cmd := exec.Command("go", goArgs...)
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
// On darwin the cwd gets resolved to the real path, which breaks anything that
|
||||
// expects the working directory to keep the original path, including the
|
||||
// go command when dealing with modules.
|
||||
// The Go stdlib has a special feature where if the cwd and the PWD are the
|
||||
// same node then it trusts the PWD, so by setting it in the env for the child
|
||||
// process we fix up all the paths returned by the go command.
|
||||
cmd.Env = append(os.Environ(), i.Env...)
|
||||
if i.WorkingDir != "" {
|
||||
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
|
||||
cmd.Dir = i.WorkingDir
|
||||
}
|
||||
|
||||
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||
|
||||
return runCmdContext(ctx, cmd)
|
||||
}
|
||||
|
||||
// runCmdContext is like exec.CommandContext except it sends os.Interrupt
|
||||
// before os.Kill.
|
||||
func runCmdContext(ctx context.Context, cmd *exec.Cmd) error {
|
||||
if err := cmd.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
resChan := make(chan error, 1)
|
||||
go func() {
|
||||
resChan <- cmd.Wait()
|
||||
}()
|
||||
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
}
|
||||
// Cancelled. Interrupt and see if it ends voluntarily.
|
||||
cmd.Process.Signal(os.Interrupt)
|
||||
select {
|
||||
case err := <-resChan:
|
||||
return err
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
// Didn't shut down in response to interrupt. Kill it hard.
|
||||
cmd.Process.Kill()
|
||||
return <-resChan
|
||||
}
|
||||
|
||||
func cmdDebugStr(cmd *exec.Cmd) string {
|
||||
env := make(map[string]string)
|
||||
for _, kv := range cmd.Env {
|
||||
split := strings.Split(kv, "=")
|
||||
k, v := split[0], split[1]
|
||||
env[k] = v
|
||||
}
|
||||
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
|
||||
}
|
102
vendor/golang.org/x/tools/internal/gocommand/vendor.go
generated
vendored
Normal file
102
vendor/golang.org/x/tools/internal/gocommand/vendor.go
generated
vendored
Normal file
@@ -0,0 +1,102 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gocommand
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/mod/semver"
|
||||
)
|
||||
|
||||
// ModuleJSON holds information about a module.
|
||||
type ModuleJSON struct {
|
||||
Path string // module path
|
||||
Replace *ModuleJSON // replaced by this module
|
||||
Main bool // is this the main module?
|
||||
Indirect bool // is this module only an indirect dependency of main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file for this module, if any
|
||||
GoVersion string // go version used in module
|
||||
}
|
||||
|
||||
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
|
||||
|
||||
// VendorEnabled reports whether vendoring is enabled. It takes a *Runner to execute Go commands
|
||||
// with the supplied context.Context and Invocation. The Invocation can contain pre-defined fields,
|
||||
// of which only Verb and Args are modified to run the appropriate Go command.
|
||||
// Inspired by setDefaultBuildMod in modload/init.go
|
||||
func VendorEnabled(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
|
||||
mainMod, go114, err := getMainModuleAnd114(ctx, inv, r)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
// We check the GOFLAGS to see if there is anything overridden or not.
|
||||
inv.Verb = "env"
|
||||
inv.Args = []string{"GOFLAGS"}
|
||||
stdout, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
goflags := string(bytes.TrimSpace(stdout.Bytes()))
|
||||
matches := modFlagRegexp.FindStringSubmatch(goflags)
|
||||
var modFlag string
|
||||
if len(matches) != 0 {
|
||||
modFlag = matches[1]
|
||||
}
|
||||
if modFlag != "" {
|
||||
// Don't override an explicit '-mod=' argument.
|
||||
return mainMod, modFlag == "vendor", nil
|
||||
}
|
||||
if mainMod == nil || !go114 {
|
||||
return mainMod, false, nil
|
||||
}
|
||||
// Check 1.14's automatic vendor mode.
|
||||
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
|
||||
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
|
||||
// The Go version is at least 1.14, and a vendor directory exists.
|
||||
// Set -mod=vendor by default.
|
||||
return mainMod, true, nil
|
||||
}
|
||||
}
|
||||
return mainMod, false, nil
|
||||
}
|
||||
|
||||
// getMainModuleAnd114 gets the main module's information and whether the
|
||||
// go command in use is 1.14+. This is the information needed to figure out
|
||||
// if vendoring should be enabled.
|
||||
func getMainModuleAnd114(ctx context.Context, inv Invocation, r *Runner) (*ModuleJSON, bool, error) {
|
||||
const format = `{{.Path}}
|
||||
{{.Dir}}
|
||||
{{.GoMod}}
|
||||
{{.GoVersion}}
|
||||
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
|
||||
`
|
||||
inv.Verb = "list"
|
||||
inv.Args = []string{"-m", "-f", format}
|
||||
stdout, err := r.Run(ctx, inv)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
if len(lines) < 5 {
|
||||
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout.String())
|
||||
}
|
||||
mod := &ModuleJSON{
|
||||
Path: lines[0],
|
||||
Dir: lines[1],
|
||||
GoMod: lines[2],
|
||||
GoVersion: lines[3],
|
||||
Main: true,
|
||||
}
|
||||
return mod, lines[4] == "go1.14", nil
|
||||
}
|
43
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
43
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
@@ -23,8 +23,10 @@ import (
|
||||
|
||||
// Options controls the behavior of a Walk call.
|
||||
type Options struct {
|
||||
Debug bool // Enable debug logging
|
||||
ModulesEnabled bool // Search module caches. Also disables legacy goimports ignore rules.
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
// Search module caches. Also disables legacy goimports ignore rules.
|
||||
ModulesEnabled bool
|
||||
}
|
||||
|
||||
// RootType indicates the type of a Root.
|
||||
@@ -77,16 +79,17 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
|
||||
}
|
||||
}
|
||||
|
||||
// walkDir creates a walker and starts fastwalk with this walker.
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
if opts.Debug {
|
||||
log.Printf("skipping nonexistent directory: %v", root.Path)
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
}
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
if opts.Debug {
|
||||
log.Printf("gopathwalk: scanning %s", root.Path)
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("gopathwalk: scanning %s", root.Path)
|
||||
}
|
||||
w := &walker{
|
||||
root: root,
|
||||
@@ -99,8 +102,8 @@ func walkDir(root Root, add func(Root, string), skip func(root Root, dir string)
|
||||
log.Printf("gopathwalk: scanning directory %v: %v", root.Path, err)
|
||||
}
|
||||
|
||||
if opts.Debug {
|
||||
log.Printf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("gopathwalk: scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +117,7 @@ type walker struct {
|
||||
ignoredDirs []os.FileInfo // The ignored directories, loaded from .goimportsignore files.
|
||||
}
|
||||
|
||||
// init initializes the walker based on its Options.
|
||||
// init initializes the walker based on its Options
|
||||
func (w *walker) init() {
|
||||
var ignoredPaths []string
|
||||
if w.root.Type == RootModuleCache {
|
||||
@@ -129,11 +132,11 @@ func (w *walker) init() {
|
||||
full := filepath.Join(w.root.Path, p)
|
||||
if fi, err := os.Stat(full); err == nil {
|
||||
w.ignoredDirs = append(w.ignoredDirs, fi)
|
||||
if w.opts.Debug {
|
||||
log.Printf("Directory added to ignore list: %s", full)
|
||||
if w.opts.Logf != nil {
|
||||
w.opts.Logf("Directory added to ignore list: %s", full)
|
||||
}
|
||||
} else if w.opts.Debug {
|
||||
log.Printf("Error statting ignored directory: %v", err)
|
||||
} else if w.opts.Logf != nil {
|
||||
w.opts.Logf("Error statting ignored directory: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -144,11 +147,11 @@ func (w *walker) init() {
|
||||
func (w *walker) getIgnoredDirs(path string) []string {
|
||||
file := filepath.Join(path, ".goimportsignore")
|
||||
slurp, err := ioutil.ReadFile(file)
|
||||
if w.opts.Debug {
|
||||
if w.opts.Logf != nil {
|
||||
if err != nil {
|
||||
log.Print(err)
|
||||
w.opts.Logf("%v", err)
|
||||
} else {
|
||||
log.Printf("Read %s", file)
|
||||
w.opts.Logf("Read %s", file)
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
@@ -167,6 +170,7 @@ func (w *walker) getIgnoredDirs(path string) []string {
|
||||
return ignoredDirs
|
||||
}
|
||||
|
||||
// shouldSkipDir reports whether the file should be skipped or not.
|
||||
func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
|
||||
for _, ignoredDir := range w.ignoredDirs {
|
||||
if os.SameFile(fi, ignoredDir) {
|
||||
@@ -180,20 +184,21 @@ func (w *walker) shouldSkipDir(fi os.FileInfo, dir string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// walk walks through the given path.
|
||||
func (w *walker) walk(path string, typ os.FileMode) error {
|
||||
dir := filepath.Dir(path)
|
||||
if typ.IsRegular() {
|
||||
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||
// Doesn't make sense to have regular files
|
||||
// directly in your $GOPATH/src or $GOROOT/src.
|
||||
return fastwalk.SkipFiles
|
||||
return fastwalk.ErrSkipFiles
|
||||
}
|
||||
if !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
w.add(w.root, dir)
|
||||
return fastwalk.SkipFiles
|
||||
return fastwalk.ErrSkipFiles
|
||||
}
|
||||
if typ == os.ModeDir {
|
||||
base := filepath.Base(path)
|
||||
@@ -221,7 +226,7 @@ func (w *walker) walk(path string, typ os.FileMode) error {
|
||||
return nil
|
||||
}
|
||||
if w.shouldTraverse(dir, fi) {
|
||||
return fastwalk.TraverseLink
|
||||
return fastwalk.ErrTraverseLink
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
5
vendor/golang.org/x/tools/internal/imports/BUILD
generated
vendored
5
vendor/golang.org/x/tools/internal/imports/BUILD
generated
vendored
@@ -14,11 +14,10 @@ go_library(
|
||||
importpath = "golang.org/x/tools/internal/imports",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = [
|
||||
"//vendor/golang.org/x/mod/module:go_default_library",
|
||||
"//vendor/golang.org/x/tools/go/ast/astutil:go_default_library",
|
||||
"//vendor/golang.org/x/tools/go/packages:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/gocommand:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/gopathwalk:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/module:go_default_library",
|
||||
"//vendor/golang.org/x/tools/internal/semver:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
673
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
673
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
@@ -14,7 +14,6 @@ import (
|
||||
"go/token"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -22,12 +21,11 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
"golang.org/x/tools/go/packages"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
@@ -52,7 +50,8 @@ var importToGroup = []func(env *ProcessEnv, importPath string) (num int, ok bool
|
||||
return
|
||||
},
|
||||
func(_ *ProcessEnv, importPath string) (num int, ok bool) {
|
||||
if strings.Contains(importPath, ".") {
|
||||
firstComponent := strings.Split(importPath, "/")[0]
|
||||
if strings.Contains(firstComponent, ".") {
|
||||
return 1, true
|
||||
}
|
||||
return
|
||||
@@ -82,7 +81,8 @@ type ImportFix struct {
|
||||
// IdentName is the identifier that this fix will add or remove.
|
||||
IdentName string
|
||||
// FixType is the type of fix this is (AddImport, DeleteImport, SetImportName).
|
||||
FixType ImportFixType
|
||||
FixType ImportFixType
|
||||
Relevance int // see pkg
|
||||
}
|
||||
|
||||
// An ImportInfo represents a single import statement.
|
||||
@@ -263,7 +263,7 @@ type pass struct {
|
||||
|
||||
// loadPackageNames saves the package names for everything referenced by imports.
|
||||
func (p *pass) loadPackageNames(imports []*ImportInfo) error {
|
||||
if p.env.Debug {
|
||||
if p.env.Logf != nil {
|
||||
p.env.Logf("loading package names for %v packages", len(imports))
|
||||
defer func() {
|
||||
p.env.Logf("done loading package names for %v packages", len(imports))
|
||||
@@ -302,7 +302,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
|
||||
if known != nil && known.name != "" {
|
||||
return known.name
|
||||
}
|
||||
return importPathToAssumedName(imp.ImportPath)
|
||||
return ImportPathToAssumedName(imp.ImportPath)
|
||||
}
|
||||
|
||||
// load reads in everything necessary to run a pass, and reports whether the
|
||||
@@ -335,7 +335,7 @@ func (p *pass) load() ([]*ImportFix, bool) {
|
||||
if p.loadRealPackageNames {
|
||||
err := p.loadPackageNames(append(imports, p.candidates...))
|
||||
if err != nil {
|
||||
if p.env.Debug {
|
||||
if p.env.Logf != nil {
|
||||
p.env.Logf("loading package names: %v", err)
|
||||
}
|
||||
return nil, false
|
||||
@@ -435,7 +435,7 @@ func (p *pass) importSpecName(imp *ImportInfo) string {
|
||||
}
|
||||
|
||||
ident := p.importIdentifier(imp)
|
||||
if ident == importPathToAssumedName(imp.ImportPath) {
|
||||
if ident == ImportPathToAssumedName(imp.ImportPath) {
|
||||
return "" // ident not needed since the assumed and real names are the same.
|
||||
}
|
||||
return ident
|
||||
@@ -529,7 +529,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
|
||||
return nil, err
|
||||
}
|
||||
srcDir := filepath.Dir(abs)
|
||||
if env.Debug {
|
||||
if env.Logf != nil {
|
||||
env.Logf("fixImports(filename=%q), abs=%q, srcDir=%q ...", filename, abs, srcDir)
|
||||
}
|
||||
|
||||
@@ -537,7 +537,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
|
||||
// derive package names from import paths, see if the file is already
|
||||
// complete. We can't add any imports yet, because we don't know
|
||||
// if missing references are actually package vars.
|
||||
p := &pass{fset: fset, f: f, srcDir: srcDir}
|
||||
p := &pass{fset: fset, f: f, srcDir: srcDir, env: env}
|
||||
if fixes, done := p.load(); done {
|
||||
return fixes, nil
|
||||
}
|
||||
@@ -559,8 +559,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
|
||||
}
|
||||
|
||||
// Third pass: get real package names where we had previously used
|
||||
// the naive algorithm. This is the first step that will use the
|
||||
// environment, so we provide it here for the first time.
|
||||
// the naive algorithm.
|
||||
p = &pass{fset: fset, f: f, srcDir: srcDir, env: env}
|
||||
p.loadRealPackageNames = true
|
||||
p.otherFiles = otherFiles
|
||||
@@ -585,89 +584,127 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
|
||||
return fixes, nil
|
||||
}
|
||||
|
||||
// getCandidatePkgs returns the list of pkgs that are accessible from filename,
|
||||
// optionall filtered to only packages named pkgName.
|
||||
func getCandidatePkgs(pkgName, filename string, env *ProcessEnv) ([]*pkg, error) {
|
||||
// TODO(heschi): filter out current package. (Don't forget x_test can import x.)
|
||||
// Highest relevance, used for the standard library. Chosen arbitrarily to
|
||||
// match pre-existing gopls code.
|
||||
const MaxRelevance = 7
|
||||
|
||||
var result []*pkg
|
||||
// getCandidatePkgs works with the passed callback to find all acceptable packages.
|
||||
// It deduplicates by import path, and uses a cached stdlib rather than reading
|
||||
// from disk.
|
||||
func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filename, filePkg string, env *ProcessEnv) error {
|
||||
notSelf := func(p *pkg) bool {
|
||||
return p.packageName != filePkg || p.dir != filepath.Dir(filename)
|
||||
}
|
||||
// Start off with the standard library.
|
||||
for importPath := range stdlib {
|
||||
if pkgName != "" && path.Base(importPath) != pkgName {
|
||||
continue
|
||||
}
|
||||
result = append(result, &pkg{
|
||||
for importPath, exports := range stdlib {
|
||||
p := &pkg{
|
||||
dir: filepath.Join(env.GOROOT, "src", importPath),
|
||||
importPathShort: importPath,
|
||||
packageName: path.Base(importPath),
|
||||
relevance: 0,
|
||||
})
|
||||
}
|
||||
|
||||
// Exclude goroot results -- getting them is relatively expensive, not cached,
|
||||
// and generally redundant with the in-memory version.
|
||||
exclude := []gopathwalk.RootType{gopathwalk.RootGOROOT}
|
||||
// Only the go/packages resolver uses the first argument, and nobody uses that resolver.
|
||||
scannedPkgs, err := env.GetResolver().scan(nil, true, exclude)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
relevance: MaxRelevance,
|
||||
}
|
||||
if notSelf(p) && wrappedCallback.packageNameLoaded(p) {
|
||||
wrappedCallback.exportsLoaded(p, exports)
|
||||
}
|
||||
}
|
||||
|
||||
var mu sync.Mutex
|
||||
dupCheck := map[string]struct{}{}
|
||||
for _, pkg := range scannedPkgs {
|
||||
if pkgName != "" && pkg.packageName != pkgName {
|
||||
continue
|
||||
}
|
||||
if !canUse(filename, pkg.dir) {
|
||||
continue
|
||||
}
|
||||
if _, ok := dupCheck[pkg.importPathShort]; ok {
|
||||
continue
|
||||
}
|
||||
dupCheck[pkg.importPathShort] = struct{}{}
|
||||
result = append(result, pkg)
|
||||
|
||||
scanFilter := &scanCallback{
|
||||
rootFound: func(root gopathwalk.Root) bool {
|
||||
// Exclude goroot results -- getting them is relatively expensive, not cached,
|
||||
// and generally redundant with the in-memory version.
|
||||
return root.Type != gopathwalk.RootGOROOT && wrappedCallback.rootFound(root)
|
||||
},
|
||||
dirFound: wrappedCallback.dirFound,
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if _, ok := dupCheck[pkg.importPathShort]; ok {
|
||||
return false
|
||||
}
|
||||
dupCheck[pkg.importPathShort] = struct{}{}
|
||||
return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg)
|
||||
},
|
||||
exportsLoaded: func(pkg *pkg, exports []string) {
|
||||
// If we're an x_test, load the package under test's test variant.
|
||||
if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) {
|
||||
var err error
|
||||
_, exports, err = loadExportsFromFiles(ctx, env, pkg.dir, true)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
wrappedCallback.exportsLoaded(pkg, exports)
|
||||
},
|
||||
}
|
||||
return env.GetResolver().scan(ctx, scanFilter)
|
||||
}
|
||||
|
||||
// Sort first by relevance, then by package name, with import path as a tiebreaker.
|
||||
sort.Slice(result, func(i, j int) bool {
|
||||
pi, pj := result[i], result[j]
|
||||
if pi.relevance != pj.relevance {
|
||||
return pi.relevance < pj.relevance
|
||||
}
|
||||
if pi.packageName != pj.packageName {
|
||||
return pi.packageName < pj.packageName
|
||||
}
|
||||
return pi.importPathShort < pj.importPathShort
|
||||
})
|
||||
func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) map[string]int {
|
||||
result := make(map[string]int)
|
||||
for _, path := range paths {
|
||||
result[path] = env.GetResolver().scoreImportPath(ctx, path)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
return result, nil
|
||||
func PrimeCache(ctx context.Context, env *ProcessEnv) error {
|
||||
// Fully scan the disk for directories, but don't actually read any Go files.
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
}
|
||||
return getCandidatePkgs(ctx, callback, "", "", env)
|
||||
}
|
||||
|
||||
func candidateImportName(pkg *pkg) string {
|
||||
if importPathToAssumedName(pkg.importPathShort) != pkg.packageName {
|
||||
if ImportPathToAssumedName(pkg.importPathShort) != pkg.packageName {
|
||||
return pkg.packageName
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// getAllCandidates gets all of the candidates to be imported, regardless of if they are needed.
|
||||
func getAllCandidates(filename string, env *ProcessEnv) ([]ImportFix, error) {
|
||||
pkgs, err := getCandidatePkgs("", filename, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func getAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix, filename, filePkg string, env *ProcessEnv) error {
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
if !canUse(filename, pkg.dir) {
|
||||
return false
|
||||
}
|
||||
// Try the assumed package name first, then a simpler path match
|
||||
// in case of packages named vN, which are not uncommon.
|
||||
return strings.HasPrefix(ImportPathToAssumedName(pkg.importPathShort), searchPrefix) ||
|
||||
strings.HasPrefix(path.Base(pkg.importPathShort), searchPrefix)
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
if !strings.HasPrefix(pkg.packageName, searchPrefix) {
|
||||
return false
|
||||
}
|
||||
wrapped(ImportFix{
|
||||
StmtInfo: ImportInfo{
|
||||
ImportPath: pkg.importPathShort,
|
||||
Name: candidateImportName(pkg),
|
||||
},
|
||||
IdentName: pkg.packageName,
|
||||
FixType: AddImport,
|
||||
Relevance: pkg.relevance,
|
||||
})
|
||||
return false
|
||||
},
|
||||
}
|
||||
result := make([]ImportFix, 0, len(pkgs))
|
||||
for _, pkg := range pkgs {
|
||||
result = append(result, ImportFix{
|
||||
StmtInfo: ImportInfo{
|
||||
ImportPath: pkg.importPathShort,
|
||||
Name: candidateImportName(pkg),
|
||||
},
|
||||
IdentName: pkg.packageName,
|
||||
FixType: AddImport,
|
||||
})
|
||||
}
|
||||
return result, nil
|
||||
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
|
||||
}
|
||||
|
||||
// A PackageExport is a package and its exports.
|
||||
@@ -676,64 +713,63 @@ type PackageExport struct {
|
||||
Exports []string
|
||||
}
|
||||
|
||||
func getPackageExports(completePackage, filename string, env *ProcessEnv) ([]PackageExport, error) {
|
||||
pkgs, err := getCandidatePkgs(completePackage, filename, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchPkg, filename, filePkg string, env *ProcessEnv) error {
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return pkgIsCandidate(filename, references{searchPkg: nil}, pkg)
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
return pkg.packageName == searchPkg
|
||||
},
|
||||
exportsLoaded: func(pkg *pkg, exports []string) {
|
||||
sort.Strings(exports)
|
||||
wrapped(PackageExport{
|
||||
Fix: &ImportFix{
|
||||
StmtInfo: ImportInfo{
|
||||
ImportPath: pkg.importPathShort,
|
||||
Name: candidateImportName(pkg),
|
||||
},
|
||||
IdentName: pkg.packageName,
|
||||
FixType: AddImport,
|
||||
Relevance: pkg.relevance,
|
||||
},
|
||||
Exports: exports,
|
||||
})
|
||||
},
|
||||
}
|
||||
|
||||
results := make([]PackageExport, 0, len(pkgs))
|
||||
for _, pkg := range pkgs {
|
||||
fix := &ImportFix{
|
||||
StmtInfo: ImportInfo{
|
||||
ImportPath: pkg.importPathShort,
|
||||
Name: candidateImportName(pkg),
|
||||
},
|
||||
IdentName: pkg.packageName,
|
||||
FixType: AddImport,
|
||||
}
|
||||
var exports []string
|
||||
if e, ok := stdlib[pkg.importPathShort]; ok {
|
||||
exports = e
|
||||
} else {
|
||||
exports, err = loadExportsForPackage(context.Background(), env, completePackage, pkg)
|
||||
if err != nil {
|
||||
if env.Debug {
|
||||
env.Logf("while completing %q, error loading exports from %q: %v", completePackage, pkg.importPathShort, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
sort.Strings(exports)
|
||||
results = append(results, PackageExport{
|
||||
Fix: fix,
|
||||
Exports: exports,
|
||||
})
|
||||
}
|
||||
|
||||
return results, nil
|
||||
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
|
||||
}
|
||||
|
||||
// ProcessEnv contains environment variables and settings that affect the use of
|
||||
// the go command, the go/build package, etc.
|
||||
type ProcessEnv struct {
|
||||
LocalPrefix string
|
||||
Debug bool
|
||||
|
||||
GocmdRunner *gocommand.Runner
|
||||
|
||||
BuildFlags []string
|
||||
|
||||
// If non-empty, these will be used instead of the
|
||||
// process-wide values.
|
||||
GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string
|
||||
WorkingDir string
|
||||
|
||||
// If true, use go/packages regardless of the environment.
|
||||
ForceGoPackages bool
|
||||
|
||||
// Logf is the default logger for the ProcessEnv.
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
resolver Resolver
|
||||
}
|
||||
|
||||
// CopyConfig copies the env's configuration into a new env.
|
||||
func (e *ProcessEnv) CopyConfig() *ProcessEnv {
|
||||
copy := *e
|
||||
copy.resolver = nil
|
||||
return ©
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) env() []string {
|
||||
env := os.Environ()
|
||||
add := func(k, v string) {
|
||||
@@ -757,73 +793,55 @@ func (e *ProcessEnv) GetResolver() Resolver {
|
||||
if e.resolver != nil {
|
||||
return e.resolver
|
||||
}
|
||||
if e.ForceGoPackages {
|
||||
e.resolver = &goPackagesResolver{env: e}
|
||||
return e.resolver
|
||||
}
|
||||
|
||||
out, err := e.invokeGo("env", "GOMOD")
|
||||
out, err := e.invokeGo(context.TODO(), "env", "GOMOD")
|
||||
if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 {
|
||||
e.resolver = &gopathResolver{env: e}
|
||||
e.resolver = newGopathResolver(e)
|
||||
return e.resolver
|
||||
}
|
||||
e.resolver = &ModuleResolver{env: e}
|
||||
e.resolver = newModuleResolver(e)
|
||||
return e.resolver
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) newPackagesConfig(mode packages.LoadMode) *packages.Config {
|
||||
return &packages.Config{
|
||||
Mode: mode,
|
||||
Dir: e.WorkingDir,
|
||||
Env: e.env(),
|
||||
}
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) buildContext() *build.Context {
|
||||
ctx := build.Default
|
||||
ctx.GOROOT = e.GOROOT
|
||||
ctx.GOPATH = e.GOPATH
|
||||
|
||||
// As of Go 1.14, build.Context has a WorkingDir field
|
||||
// As of Go 1.14, build.Context has a Dir field
|
||||
// (see golang.org/issue/34860).
|
||||
// Populate it only if present.
|
||||
if wd := reflect.ValueOf(&ctx).Elem().FieldByName("WorkingDir"); wd.IsValid() && wd.Kind() == reflect.String {
|
||||
wd.SetString(e.WorkingDir)
|
||||
rc := reflect.ValueOf(&ctx).Elem()
|
||||
dir := rc.FieldByName("Dir")
|
||||
if !dir.IsValid() {
|
||||
// Working drafts of Go 1.14 named the field "WorkingDir" instead.
|
||||
// TODO(bcmills): Remove this case after the Go 1.14 beta has been released.
|
||||
dir = rc.FieldByName("WorkingDir")
|
||||
}
|
||||
if dir.IsValid() && dir.Kind() == reflect.String {
|
||||
dir.SetString(e.WorkingDir)
|
||||
}
|
||||
|
||||
return &ctx
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) invokeGo(args ...string) (*bytes.Buffer, error) {
|
||||
cmd := exec.Command("go", args...)
|
||||
stdout := &bytes.Buffer{}
|
||||
stderr := &bytes.Buffer{}
|
||||
cmd.Stdout = stdout
|
||||
cmd.Stderr = stderr
|
||||
cmd.Env = e.env()
|
||||
cmd.Dir = e.WorkingDir
|
||||
|
||||
if e.Debug {
|
||||
defer func(start time.Time) { e.Logf("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||
func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string) (*bytes.Buffer, error) {
|
||||
inv := gocommand.Invocation{
|
||||
Verb: verb,
|
||||
Args: args,
|
||||
BuildFlags: e.BuildFlags,
|
||||
Env: e.env(),
|
||||
Logf: e.Logf,
|
||||
WorkingDir: e.WorkingDir,
|
||||
}
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, fmt.Errorf("running go: %v (stderr:\n%s)", err, stderr)
|
||||
}
|
||||
return stdout, nil
|
||||
}
|
||||
|
||||
func cmdDebugStr(cmd *exec.Cmd) string {
|
||||
env := make(map[string]string)
|
||||
for _, kv := range cmd.Env {
|
||||
split := strings.Split(kv, "=")
|
||||
k, v := split[0], split[1]
|
||||
env[k] = v
|
||||
}
|
||||
|
||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v GOPROXY=%v PWD=%v go %v", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["GOPROXY"], env["PWD"], cmd.Args)
|
||||
return e.GocmdRunner.Run(ctx, inv)
|
||||
}
|
||||
|
||||
func addStdlibCandidates(pass *pass, refs references) {
|
||||
add := func(pkg string) {
|
||||
// Prevent self-imports.
|
||||
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir {
|
||||
return
|
||||
}
|
||||
exports := copyExports(stdlib[pkg])
|
||||
pass.addCandidate(
|
||||
&ImportInfo{ImportPath: pkg},
|
||||
@@ -848,94 +866,65 @@ func addStdlibCandidates(pass *pass, refs references) {
|
||||
type Resolver interface {
|
||||
// loadPackageNames loads the package names in importPaths.
|
||||
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
|
||||
// scan finds (at least) the packages satisfying refs. If loadNames is true,
|
||||
// package names will be set on the results, and dirs whose package name
|
||||
// could not be determined will be excluded.
|
||||
scan(refs references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error)
|
||||
// scan works with callback to search for packages. See scanCallback for details.
|
||||
scan(ctx context.Context, callback *scanCallback) error
|
||||
// loadExports returns the set of exported symbols in the package at dir.
|
||||
// loadExports may be called concurrently.
|
||||
loadExports(ctx context.Context, pkg *pkg) (string, []string, error)
|
||||
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error)
|
||||
// scoreImportPath returns the relevance for an import path.
|
||||
scoreImportPath(ctx context.Context, path string) int
|
||||
|
||||
ClearForNewScan()
|
||||
}
|
||||
|
||||
// gopackagesResolver implements resolver for GOPATH and module workspaces using go/packages.
|
||||
type goPackagesResolver struct {
|
||||
env *ProcessEnv
|
||||
}
|
||||
|
||||
func (r *goPackagesResolver) ClearForNewScan() {}
|
||||
|
||||
func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if len(importPaths) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
cfg := r.env.newPackagesConfig(packages.LoadFiles)
|
||||
pkgs, err := packages.Load(cfg, importPaths...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, pkg := range pkgs {
|
||||
names[VendorlessPath(pkg.PkgPath)] = pkg.Name
|
||||
}
|
||||
// We may not have found all the packages. Guess the rest.
|
||||
for _, path := range importPaths {
|
||||
if _, ok := names[path]; ok {
|
||||
continue
|
||||
}
|
||||
names[path] = importPathToAssumedName(path)
|
||||
}
|
||||
return names, nil
|
||||
|
||||
}
|
||||
|
||||
func (r *goPackagesResolver) scan(refs references, _ bool, _ []gopathwalk.RootType) ([]*pkg, error) {
|
||||
var loadQueries []string
|
||||
for pkgName := range refs {
|
||||
loadQueries = append(loadQueries, "iamashamedtousethedisabledqueryname="+pkgName)
|
||||
}
|
||||
sort.Strings(loadQueries)
|
||||
cfg := r.env.newPackagesConfig(packages.LoadFiles)
|
||||
goPackages, err := packages.Load(cfg, loadQueries...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var scan []*pkg
|
||||
for _, goPackage := range goPackages {
|
||||
scan = append(scan, &pkg{
|
||||
dir: filepath.Dir(goPackage.CompiledGoFiles[0]),
|
||||
importPathShort: VendorlessPath(goPackage.PkgPath),
|
||||
goPackage: goPackage,
|
||||
packageName: goPackage.Name,
|
||||
})
|
||||
}
|
||||
return scan, nil
|
||||
}
|
||||
|
||||
func (r *goPackagesResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
|
||||
if pkg.goPackage == nil {
|
||||
return "", nil, fmt.Errorf("goPackage not set")
|
||||
}
|
||||
var exports []string
|
||||
fset := token.NewFileSet()
|
||||
for _, fname := range pkg.goPackage.CompiledGoFiles {
|
||||
f, err := parser.ParseFile(fset, fname, nil, 0)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("parsing %s: %v", fname, err)
|
||||
}
|
||||
for name := range f.Scope.Objects {
|
||||
if ast.IsExported(name) {
|
||||
exports = append(exports, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return pkg.goPackage.Name, exports, nil
|
||||
// A scanCallback controls a call to scan and receives its results.
|
||||
// In general, minor errors will be silently discarded; a user should not
|
||||
// expect to receive a full series of calls for everything.
|
||||
type scanCallback struct {
|
||||
// rootFound is called before scanning a new root dir. If it returns true,
|
||||
// the root will be scanned. Returning false will not necessarily prevent
|
||||
// directories from that root making it to dirFound.
|
||||
rootFound func(gopathwalk.Root) bool
|
||||
// dirFound is called when a directory is found that is possibly a Go package.
|
||||
// pkg will be populated with everything except packageName.
|
||||
// If it returns true, the package's name will be loaded.
|
||||
dirFound func(pkg *pkg) bool
|
||||
// packageNameLoaded is called when a package is found and its name is loaded.
|
||||
// If it returns true, the package's exports will be loaded.
|
||||
packageNameLoaded func(pkg *pkg) bool
|
||||
// exportsLoaded is called when a package's exports have been loaded.
|
||||
exportsLoaded func(pkg *pkg, exports []string)
|
||||
}
|
||||
|
||||
func addExternalCandidates(pass *pass, refs references, filename string) error {
|
||||
dirScan, err := pass.env.GetResolver().scan(refs, false, nil)
|
||||
var mu sync.Mutex
|
||||
found := make(map[string][]pkgDistance)
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true // We want everything.
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return pkgIsCandidate(filename, refs, pkg)
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
if _, want := refs[pkg.packageName]; !want {
|
||||
return false
|
||||
}
|
||||
if pkg.dir == pass.srcDir && pass.f.Name.Name == pkg.packageName {
|
||||
// The candidate is in the same directory and has the
|
||||
// same package name. Don't try to import ourselves.
|
||||
return false
|
||||
}
|
||||
if !canUse(filename, pkg.dir) {
|
||||
return false
|
||||
}
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
found[pkg.packageName] = append(found[pkg.packageName], pkgDistance{pkg, distance(pass.srcDir, pkg.dir)})
|
||||
return false // We'll do our own loading after we sort.
|
||||
},
|
||||
}
|
||||
err := pass.env.GetResolver().scan(context.Background(), callback)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -962,7 +951,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error {
|
||||
go func(pkgName string, symbols map[string]bool) {
|
||||
defer wg.Done()
|
||||
|
||||
found, err := findImport(ctx, pass, dirScan, pkgName, symbols, filename)
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename)
|
||||
|
||||
if err != nil {
|
||||
firstErrOnce.Do(func() {
|
||||
@@ -1006,7 +995,7 @@ func notIdentifier(ch rune) bool {
|
||||
ch >= utf8.RuneSelf && (unicode.IsLetter(ch) || unicode.IsDigit(ch)))
|
||||
}
|
||||
|
||||
// importPathToAssumedName returns the assumed package name of an import path.
|
||||
// ImportPathToAssumedName returns the assumed package name of an import path.
|
||||
// It does this using only string parsing of the import path.
|
||||
// It picks the last element of the path that does not look like a major
|
||||
// version, and then picks the valid identifier off the start of that element.
|
||||
@@ -1014,7 +1003,7 @@ func notIdentifier(ch rune) bool {
|
||||
// clarity.
|
||||
// This function could be moved to a standard package and exported if we want
|
||||
// for use in other tools.
|
||||
func importPathToAssumedName(importPath string) string {
|
||||
func ImportPathToAssumedName(importPath string) string {
|
||||
base := path.Base(importPath)
|
||||
if strings.HasPrefix(base, "v") {
|
||||
if _, err := strconv.Atoi(base[1:]); err == nil {
|
||||
@@ -1033,24 +1022,36 @@ func importPathToAssumedName(importPath string) string {
|
||||
|
||||
// gopathResolver implements resolver for GOPATH workspaces.
|
||||
type gopathResolver struct {
|
||||
env *ProcessEnv
|
||||
cache *dirInfoCache
|
||||
env *ProcessEnv
|
||||
walked bool
|
||||
cache *dirInfoCache
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans.
|
||||
}
|
||||
|
||||
func (r *gopathResolver) init() {
|
||||
if r.cache == nil {
|
||||
r.cache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
}
|
||||
func newGopathResolver(env *ProcessEnv) *gopathResolver {
|
||||
r := &gopathResolver{
|
||||
env: env,
|
||||
cache: &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
},
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *gopathResolver) ClearForNewScan() {
|
||||
r.cache = nil
|
||||
<-r.scanSema
|
||||
r.cache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.walked = false
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
|
||||
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
r.init()
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
names[path] = importPathToName(r.env, path, srcDir)
|
||||
@@ -1130,7 +1131,6 @@ func packageDirToName(dir string) (packageName string, err error) {
|
||||
}
|
||||
|
||||
type pkg struct {
|
||||
goPackage *packages.Package
|
||||
dir string // absolute file path to pkg directory ("/usr/lib/go/src/net/http")
|
||||
importPathShort string // vendorless import path ("net/http", "a/b")
|
||||
packageName string // package name loaded from source if requested
|
||||
@@ -1178,8 +1178,7 @@ func distance(basepath, targetpath string) int {
|
||||
return strings.Count(p, string(filepath.Separator)) + 1
|
||||
}
|
||||
|
||||
func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) {
|
||||
r.init()
|
||||
func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error {
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
// We assume cached directories have not changed. We can skip them and their
|
||||
// children.
|
||||
@@ -1196,56 +1195,84 @@ func (r *gopathResolver) scan(_ references, loadNames bool, exclude []gopathwalk
|
||||
}
|
||||
r.cache.Store(dir, info)
|
||||
}
|
||||
roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), exclude)
|
||||
gopathwalk.Walk(roots, add, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: false})
|
||||
var result []*pkg
|
||||
for _, dir := range r.cache.Keys() {
|
||||
info, ok := r.cache.Load(dir)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if loadNames {
|
||||
var err error
|
||||
info, err = r.cache.CachePackageName(info)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
processDir := func(info directoryPackageInfo) {
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
p := &pkg{
|
||||
importPathShort: info.nonCanonicalImportPath,
|
||||
dir: dir,
|
||||
relevance: 1,
|
||||
packageName: info.packageName,
|
||||
dir: info.dir,
|
||||
relevance: MaxRelevance - 1,
|
||||
}
|
||||
if info.rootType == gopathwalk.RootGOROOT {
|
||||
p.relevance = 0
|
||||
p.relevance = MaxRelevance
|
||||
}
|
||||
|
||||
if !callback.dirFound(p) {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
p.packageName, err = r.cache.CachePackageName(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.packageNameLoaded(p) {
|
||||
return
|
||||
}
|
||||
if _, exports, err := r.loadExports(ctx, p, false); err == nil {
|
||||
callback.exportsLoaded(p, exports)
|
||||
}
|
||||
result = append(result, p)
|
||||
}
|
||||
return result, nil
|
||||
stop := r.cache.ScanAndListen(ctx, processDir)
|
||||
defer stop()
|
||||
// The callback is not necessarily safe to use in the goroutine below. Process roots eagerly.
|
||||
roots := filterRoots(gopathwalk.SrcDirsRoots(r.env.buildContext()), callback.rootFound)
|
||||
// We can't cancel walks, because we need them to finish to have a usable
|
||||
// cache. Instead, run them in a separate goroutine and detach.
|
||||
scanDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.scanSema:
|
||||
}
|
||||
defer func() { r.scanSema <- struct{}{} }()
|
||||
gopathwalk.Walk(roots, add, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: false})
|
||||
close(scanDone)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-scanDone:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func filterRoots(roots []gopathwalk.Root, exclude []gopathwalk.RootType) []gopathwalk.Root {
|
||||
func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) int {
|
||||
if _, ok := stdlib[path]; ok {
|
||||
return MaxRelevance
|
||||
}
|
||||
return MaxRelevance - 1
|
||||
}
|
||||
|
||||
func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []gopathwalk.Root {
|
||||
var result []gopathwalk.Root
|
||||
outer:
|
||||
for _, root := range roots {
|
||||
for _, i := range exclude {
|
||||
if i == root.Type {
|
||||
continue outer
|
||||
}
|
||||
if !include(root) {
|
||||
continue
|
||||
}
|
||||
result = append(result, root)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
|
||||
r.init()
|
||||
if info, ok := r.cache.Load(pkg.dir); ok {
|
||||
func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
if info, ok := r.cache.Load(pkg.dir); ok && !includeTest {
|
||||
return r.cache.CacheExports(ctx, r.env, info)
|
||||
}
|
||||
return loadExportsFromFiles(ctx, r.env, pkg.dir)
|
||||
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
|
||||
}
|
||||
|
||||
// VendorlessPath returns the devendorized version of the import path ipath.
|
||||
@@ -1261,7 +1288,7 @@ func VendorlessPath(ipath string) string {
|
||||
return ipath
|
||||
}
|
||||
|
||||
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (string, []string, error) {
|
||||
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) {
|
||||
var exports []string
|
||||
|
||||
// Look for non-test, buildable .go files which could provide exports.
|
||||
@@ -1272,7 +1299,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
|
||||
var files []os.FileInfo
|
||||
for _, fi := range all {
|
||||
name := fi.Name()
|
||||
if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") {
|
||||
if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) {
|
||||
continue
|
||||
}
|
||||
match, err := env.buildContext().MatchFile(dir, fi.Name())
|
||||
@@ -1298,13 +1325,20 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
|
||||
fullFile := filepath.Join(dir, fi.Name())
|
||||
f, err := parser.ParseFile(fset, fullFile, nil, 0)
|
||||
if err != nil {
|
||||
return "", nil, fmt.Errorf("parsing %s: %v", fullFile, err)
|
||||
if env.Logf != nil {
|
||||
env.Logf("error parsing %v: %v", fullFile, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if f.Name.Name == "documentation" {
|
||||
// Special case from go/build.ImportDir, not
|
||||
// handled by MatchFile above.
|
||||
continue
|
||||
}
|
||||
if includeTest && strings.HasSuffix(f.Name.Name, "_test") {
|
||||
// x_test package. We want internal test files only.
|
||||
continue
|
||||
}
|
||||
pkgName = f.Name.Name
|
||||
for name := range f.Scope.Objects {
|
||||
if ast.IsExported(name) {
|
||||
@@ -1313,7 +1347,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
|
||||
}
|
||||
}
|
||||
|
||||
if env.Debug {
|
||||
if env.Logf != nil {
|
||||
sortedExports := append([]string(nil), exports...)
|
||||
sort.Strings(sortedExports)
|
||||
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", "))
|
||||
@@ -1323,42 +1357,19 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string) (str
|
||||
|
||||
// findImport searches for a package with the given symbols.
|
||||
// If no package is found, findImport returns ("", false, nil)
|
||||
func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
|
||||
pkgDir, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pkgDir = filepath.Dir(pkgDir)
|
||||
|
||||
// Find candidate packages, looking only at their directory names first.
|
||||
var candidates []pkgDistance
|
||||
for _, pkg := range dirScan {
|
||||
if pkg.dir == pkgDir && pass.f.Name.Name == pkgName {
|
||||
// The candidate is in the same directory and has the
|
||||
// same package name. Don't try to import ourselves.
|
||||
continue
|
||||
}
|
||||
if pkgIsCandidate(filename, pkgName, pkg) {
|
||||
candidates = append(candidates, pkgDistance{
|
||||
pkg: pkg,
|
||||
distance: distance(pkgDir, pkg.dir),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
|
||||
// Sort the candidates by their import package length,
|
||||
// assuming that shorter package names are better than long
|
||||
// ones. Note that this sorts by the de-vendored name, so
|
||||
// there's no "penalty" for vendoring.
|
||||
sort.Sort(byDistanceOrImportPathShortLength(candidates))
|
||||
if pass.env.Debug {
|
||||
if pass.env.Logf != nil {
|
||||
for i, c := range candidates {
|
||||
pass.env.Logf("%s candidate %d/%d: %v in %v", pkgName, i+1, len(candidates), c.pkg.importPathShort, c.pkg.dir)
|
||||
}
|
||||
}
|
||||
|
||||
// Collect exports for packages with matching names.
|
||||
|
||||
rescv := make([]chan *pkg, len(candidates))
|
||||
for i := range candidates {
|
||||
rescv[i] = make(chan *pkg, 1)
|
||||
@@ -1390,12 +1401,14 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
|
||||
wg.Done()
|
||||
}()
|
||||
|
||||
if pass.env.Debug {
|
||||
if pass.env.Logf != nil {
|
||||
pass.env.Logf("loading exports in dir %s (seeking package %s)", c.pkg.dir, pkgName)
|
||||
}
|
||||
exports, err := loadExportsForPackage(ctx, pass.env, pkgName, c.pkg)
|
||||
// If we're an x_test, load the package under test's test variant.
|
||||
includeTest := strings.HasSuffix(pass.f.Name.Name, "_test") && c.pkg.dir == pass.srcDir
|
||||
_, exports, err := pass.env.GetResolver().loadExports(ctx, c.pkg, includeTest)
|
||||
if err != nil {
|
||||
if pass.env.Debug {
|
||||
if pass.env.Logf != nil {
|
||||
pass.env.Logf("loading exports in dir %s (seeking package %s): %v", c.pkg.dir, pkgName, err)
|
||||
}
|
||||
resc <- nil
|
||||
@@ -1430,17 +1443,6 @@ func findImport(ctx context.Context, pass *pass, dirScan []*pkg, pkgName string,
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg string, pkg *pkg) ([]string, error) {
|
||||
pkgName, exports, err := env.GetResolver().loadExports(ctx, pkg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if expectPkg != pkgName {
|
||||
return nil, fmt.Errorf("dir %v is package %v, wanted %v", pkg.dir, pkgName, expectPkg)
|
||||
}
|
||||
return exports, err
|
||||
}
|
||||
|
||||
// pkgIsCandidate reports whether pkg is a candidate for satisfying the
|
||||
// finding which package pkgIdent in the file named by filename is trying
|
||||
// to refer to.
|
||||
@@ -1453,7 +1455,7 @@ func loadExportsForPackage(ctx context.Context, env *ProcessEnv, expectPkg strin
|
||||
// filename is the file being formatted.
|
||||
// pkgIdent is the package being searched for, like "client" (if
|
||||
// searching for "client.New")
|
||||
func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool {
|
||||
func pkgIsCandidate(filename string, refs references, pkg *pkg) bool {
|
||||
// Check "internal" and "vendor" visibility:
|
||||
if !canUse(filename, pkg.dir) {
|
||||
return false
|
||||
@@ -1471,17 +1473,18 @@ func pkgIsCandidate(filename, pkgIdent string, pkg *pkg) bool {
|
||||
// "bar", which is strongly discouraged
|
||||
// anyway. There's no reason goimports needs
|
||||
// to be slow just to accommodate that.
|
||||
lastTwo := lastTwoComponents(pkg.importPathShort)
|
||||
if strings.Contains(lastTwo, pkgIdent) {
|
||||
return true
|
||||
}
|
||||
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
|
||||
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
|
||||
for pkgIdent := range refs {
|
||||
lastTwo := lastTwoComponents(pkg.importPathShort)
|
||||
if strings.Contains(lastTwo, pkgIdent) {
|
||||
return true
|
||||
}
|
||||
if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(pkgIdent) {
|
||||
lastTwo = lowerASCIIAndRemoveHyphen(lastTwo)
|
||||
if strings.Contains(lastTwo, pkgIdent) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
|
62
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
62
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
@@ -11,6 +11,7 @@ package imports
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
@@ -20,12 +21,13 @@ import (
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
)
|
||||
|
||||
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
|
||||
@@ -83,42 +85,54 @@ func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix,
|
||||
return getFixes(fileSet, file, filename, opt.Env)
|
||||
}
|
||||
|
||||
// ApplyFix will apply all of the fixes to the file and format it.
|
||||
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) {
|
||||
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
|
||||
// is added in when parsing the file.
|
||||
func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, extraMode parser.Mode) (formatted []byte, err error) {
|
||||
src, opt, err = initialize(filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Don't use parse() -- we don't care about fragments or statement lists
|
||||
// here, and we need to work with unparseable files.
|
||||
fileSet := token.NewFileSet()
|
||||
file, adjust, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
parserMode := parser.Mode(0)
|
||||
if opt.Comments {
|
||||
parserMode |= parser.ParseComments
|
||||
}
|
||||
if opt.AllErrors {
|
||||
parserMode |= parser.AllErrors
|
||||
}
|
||||
parserMode |= extraMode
|
||||
|
||||
file, err := parser.ParseFile(fileSet, filename, src, parserMode)
|
||||
if file == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Apply the fixes to the file.
|
||||
apply(fileSet, file, fixes)
|
||||
|
||||
return formatFile(fileSet, file, src, adjust, opt)
|
||||
return formatFile(fileSet, file, src, nil, opt)
|
||||
}
|
||||
|
||||
// GetAllCandidates gets all of the standard library candidate packages to import in
|
||||
// sorted order on import path.
|
||||
func GetAllCandidates(filename string, opt *Options) (pkgs []ImportFix, err error) {
|
||||
_, opt, err = initialize(filename, nil, opt)
|
||||
// GetAllCandidates gets all of the packages starting with prefix that can be
|
||||
// imported by filename, sorted by import path.
|
||||
func GetAllCandidates(ctx context.Context, callback func(ImportFix), searchPrefix, filename, filePkg string, opt *Options) error {
|
||||
_, opt, err := initialize(filename, []byte{}, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return getAllCandidates(filename, opt.Env)
|
||||
return getAllCandidates(ctx, callback, searchPrefix, filename, filePkg, opt.Env)
|
||||
}
|
||||
|
||||
// GetPackageExports returns all known packages with name pkg and their exports.
|
||||
func GetPackageExports(pkg, filename string, opt *Options) (exports []PackageExport, err error) {
|
||||
_, opt, err = initialize(filename, nil, opt)
|
||||
func GetPackageExports(ctx context.Context, callback func(PackageExport), searchPkg, filename, filePkg string, opt *Options) error {
|
||||
_, opt, err := initialize(filename, []byte{}, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return getPackageExports(pkg, filename, opt.Env)
|
||||
return getPackageExports(ctx, callback, searchPkg, filename, filePkg, opt.Env)
|
||||
}
|
||||
|
||||
// initialize sets the values for opt and src.
|
||||
@@ -133,16 +147,18 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er
|
||||
// Set the env if the user has not provided it.
|
||||
if opt.Env == nil {
|
||||
opt.Env = &ProcessEnv{
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOROOT: build.Default.GOROOT,
|
||||
GOPATH: build.Default.GOPATH,
|
||||
GOROOT: build.Default.GOROOT,
|
||||
GOFLAGS: os.Getenv("GOFLAGS"),
|
||||
GO111MODULE: os.Getenv("GO111MODULE"),
|
||||
GOPROXY: os.Getenv("GOPROXY"),
|
||||
GOSUMDB: os.Getenv("GOSUMDB"),
|
||||
}
|
||||
}
|
||||
|
||||
// Set the logger if the user has not provided it.
|
||||
if opt.Env.Logf == nil {
|
||||
opt.Env.Logf = log.Printf
|
||||
// Set the gocmdRunner if the user has not provided it.
|
||||
if opt.Env.GocmdRunner == nil {
|
||||
opt.Env.GocmdRunner = &gocommand.Runner{}
|
||||
}
|
||||
|
||||
if src == nil {
|
||||
b, err := ioutil.ReadFile(filename)
|
||||
if err != nil {
|
||||
|
370
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
370
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
@@ -13,11 +13,10 @@ import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/module"
|
||||
"golang.org/x/tools/internal/semver"
|
||||
)
|
||||
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
@@ -25,32 +24,42 @@ import (
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
moduleCacheDir string
|
||||
dummyVendorMod *ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
roots []gopathwalk.Root
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
|
||||
scannedRoots map[gopathwalk.Root]bool
|
||||
|
||||
Initialized bool
|
||||
Main *ModuleJSON
|
||||
ModsByModPath []*ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
ModsByDir []*ModuleJSON // ...or Dir.
|
||||
initialized bool
|
||||
main *gocommand.ModuleJSON
|
||||
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*gocommand.ModuleJSON // ...or Dir.
|
||||
|
||||
// moduleCacheCache stores information about the module cache.
|
||||
moduleCacheCache *dirInfoCache
|
||||
otherCache *dirInfoCache
|
||||
}
|
||||
|
||||
type ModuleJSON struct {
|
||||
Path string // module path
|
||||
Replace *ModuleJSON // replaced by this module
|
||||
Main bool // is this the main module?
|
||||
Dir string // directory holding files for this module, if any
|
||||
GoMod string // path to go.mod file for this module, if any
|
||||
GoVersion string // go version used in module
|
||||
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
|
||||
r := &ModuleResolver{
|
||||
env: e,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.Initialized {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
mainMod, vendorEnabled, err := vendorEnabled(r.env)
|
||||
|
||||
inv := gocommand.Invocation{
|
||||
BuildFlags: r.env.BuildFlags,
|
||||
Env: r.env.env(),
|
||||
Logf: r.env.Logf,
|
||||
WorkingDir: r.env.WorkingDir,
|
||||
}
|
||||
mainMod, vendorEnabled, err := gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -58,13 +67,13 @@ func (r *ModuleResolver) init() error {
|
||||
if mainMod != nil && vendorEnabled {
|
||||
// Vendor mode is on, so all the non-Main modules are irrelevant,
|
||||
// and we need to search /vendor for everything.
|
||||
r.Main = mainMod
|
||||
r.dummyVendorMod = &ModuleJSON{
|
||||
r.main = mainMod
|
||||
r.dummyVendorMod = &gocommand.ModuleJSON{
|
||||
Path: "",
|
||||
Dir: filepath.Join(mainMod.Dir, "vendor"),
|
||||
}
|
||||
r.ModsByModPath = []*ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
r.ModsByDir = []*ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
r.modsByModPath = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
r.modsByDir = []*gocommand.ModuleJSON{mainMod, r.dummyVendorMod}
|
||||
} else {
|
||||
// Vendor mode is off, so run go list -m ... to find everything.
|
||||
r.initAllMods()
|
||||
@@ -72,79 +81,123 @@ func (r *ModuleResolver) init() error {
|
||||
|
||||
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
|
||||
|
||||
sort.Slice(r.ModsByModPath, func(i, j int) bool {
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.ModsByModPath[x].Path, "/")
|
||||
return strings.Count(r.modsByModPath[x].Path, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
sort.Slice(r.ModsByDir, func(i, j int) bool {
|
||||
sort.Slice(r.modsByDir, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.ModsByDir[x].Dir, "/")
|
||||
return strings.Count(r.modsByDir[x].Dir, "/")
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
|
||||
r.roots = []gopathwalk.Root{
|
||||
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
||||
}
|
||||
if r.main != nil {
|
||||
r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
|
||||
}
|
||||
if vendorEnabled {
|
||||
r.roots = append(r.roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
|
||||
} else {
|
||||
addDep := func(mod *gocommand.ModuleJSON) {
|
||||
if mod.Replace == nil {
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough.
|
||||
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootModuleCache})
|
||||
} else {
|
||||
r.roots = append(r.roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
|
||||
}
|
||||
}
|
||||
// Walk dependent modules before scanning the full mod cache, direct deps first.
|
||||
for _, mod := range r.modsByModPath {
|
||||
if !mod.Indirect && !mod.Main {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
for _, mod := range r.modsByModPath {
|
||||
if mod.Indirect && !mod.Main {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
r.roots = append(r.roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
if r.moduleCacheCache == nil {
|
||||
r.moduleCacheCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
}
|
||||
if r.otherCache == nil {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
}
|
||||
r.Initialized = true
|
||||
r.initialized = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) initAllMods() error {
|
||||
stdout, err := r.env.invokeGo("list", "-m", "-json", "...")
|
||||
stdout, err := r.env.invokeGo(context.TODO(), "list", "-m", "-json", "...")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for dec := json.NewDecoder(stdout); dec.More(); {
|
||||
mod := &ModuleJSON{}
|
||||
mod := &gocommand.ModuleJSON{}
|
||||
if err := dec.Decode(mod); err != nil {
|
||||
return err
|
||||
}
|
||||
if mod.Dir == "" {
|
||||
if r.env.Debug {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("module %v has not been downloaded and will be ignored", mod.Path)
|
||||
}
|
||||
// Can't do anything with a module that's not downloaded.
|
||||
continue
|
||||
}
|
||||
r.ModsByModPath = append(r.ModsByModPath, mod)
|
||||
r.ModsByDir = append(r.ModsByDir, mod)
|
||||
// golang/go#36193: the go command doesn't always clean paths.
|
||||
mod.Dir = filepath.Clean(mod.Dir)
|
||||
r.modsByModPath = append(r.modsByModPath, mod)
|
||||
r.modsByDir = append(r.modsByDir, mod)
|
||||
if mod.Main {
|
||||
r.Main = mod
|
||||
r.main = mod
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewMod() {
|
||||
env := r.env
|
||||
<-r.scanSema
|
||||
*r = ModuleResolver{
|
||||
env: env,
|
||||
env: r.env,
|
||||
moduleCacheCache: r.moduleCacheCache,
|
||||
otherCache: r.otherCache,
|
||||
scanSema: r.scanSema,
|
||||
}
|
||||
r.init()
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*ModuleJSON, string) {
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
|
||||
// This can't find packages in the stdlib, but that's harmless for all
|
||||
// the existing code paths.
|
||||
for _, m := range r.ModsByModPath {
|
||||
for _, m := range r.modsByModPath {
|
||||
if !strings.HasPrefix(importPath, m.Path) {
|
||||
continue
|
||||
}
|
||||
@@ -211,7 +264,7 @@ func (r *ModuleResolver) cacheKeys() []string {
|
||||
}
|
||||
|
||||
// cachePackageName caches the package name for a dir already in the cache.
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CachePackageName(info)
|
||||
}
|
||||
@@ -227,7 +280,7 @@ func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info
|
||||
|
||||
// findModuleByDir returns the module that contains dir, or nil if no such
|
||||
// module is in scope.
|
||||
func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
|
||||
func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON {
|
||||
// This is quite tricky and may not be correct. dir could be:
|
||||
// - a package in the main module.
|
||||
// - a replace target underneath the main module's directory.
|
||||
@@ -238,7 +291,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
|
||||
// - in /vendor/ in -mod=vendor mode.
|
||||
// - nested module? Dunno.
|
||||
// Rumor has it that replace targets cannot contain other replace targets.
|
||||
for _, m := range r.ModsByDir {
|
||||
for _, m := range r.modsByDir {
|
||||
if !strings.HasPrefix(dir, m.Dir) {
|
||||
continue
|
||||
}
|
||||
@@ -254,7 +307,7 @@ func (r *ModuleResolver) findModuleByDir(dir string) *ModuleJSON {
|
||||
|
||||
// dirIsNestedModule reports if dir is contained in a nested module underneath
|
||||
// mod, not actually in mod.
|
||||
func (r *ModuleResolver) dirIsNestedModule(dir string, mod *ModuleJSON) bool {
|
||||
func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON) bool {
|
||||
if !strings.HasPrefix(dir, mod.Dir) {
|
||||
return false
|
||||
}
|
||||
@@ -333,41 +386,49 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk.RootType) ([]*pkg, error) {
|
||||
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Walk GOROOT, GOPATH/pkg/mod, and the main module.
|
||||
roots := []gopathwalk.Root{
|
||||
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
||||
}
|
||||
if r.Main != nil {
|
||||
roots = append(roots, gopathwalk.Root{r.Main.Dir, gopathwalk.RootCurrentModule})
|
||||
}
|
||||
if r.dummyVendorMod != nil {
|
||||
roots = append(roots, gopathwalk.Root{r.dummyVendorMod.Dir, gopathwalk.RootOther})
|
||||
} else {
|
||||
roots = append(roots, gopathwalk.Root{r.moduleCacheDir, gopathwalk.RootModuleCache})
|
||||
// Walk replace targets, just in case they're not in any of the above.
|
||||
for _, mod := range r.ModsByModPath {
|
||||
if mod.Replace != nil {
|
||||
roots = append(roots, gopathwalk.Root{mod.Dir, gopathwalk.RootOther})
|
||||
}
|
||||
processDir := func(info directoryPackageInfo) {
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return
|
||||
}
|
||||
pkg, err := r.canonicalize(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.dirFound(pkg) {
|
||||
return
|
||||
}
|
||||
pkg.packageName, err = r.cachePackageName(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.packageNameLoaded(pkg) {
|
||||
return
|
||||
}
|
||||
_, exports, err := r.loadExports(ctx, pkg, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
callback.exportsLoaded(pkg, exports)
|
||||
}
|
||||
|
||||
roots = filterRoots(roots, exclude)
|
||||
// Start processing everything in the cache, and listen for the new stuff
|
||||
// we discover in the walk below.
|
||||
stop1 := r.moduleCacheCache.ScanAndListen(ctx, processDir)
|
||||
defer stop1()
|
||||
stop2 := r.otherCache.ScanAndListen(ctx, processDir)
|
||||
defer stop2()
|
||||
|
||||
var result []*pkg
|
||||
var mu sync.Mutex
|
||||
|
||||
// We assume cached directories have not changed. We can skip them and their
|
||||
// children.
|
||||
// We assume cached directories are fully cached, including all their
|
||||
// children, and have not changed. We can skip them.
|
||||
skip := func(root gopathwalk.Root, dir string) bool {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
info, ok := r.cacheLoad(dir)
|
||||
if !ok {
|
||||
return false
|
||||
@@ -379,44 +440,64 @@ func (r *ModuleResolver) scan(_ references, loadNames bool, exclude []gopathwalk
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
// Add anything new to the cache. We'll process everything in it below.
|
||||
// Add anything new to the cache, and process it if we're still listening.
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
|
||||
r.cacheStore(r.scanDirForPackage(root, dir))
|
||||
}
|
||||
|
||||
gopathwalk.WalkSkip(roots, add, skip, gopathwalk.Options{Debug: r.env.Debug, ModulesEnabled: true})
|
||||
|
||||
// Everything we already had, and everything new, is now in the cache.
|
||||
for _, dir := range r.cacheKeys() {
|
||||
info, ok := r.cacheLoad(dir)
|
||||
if !ok {
|
||||
continue
|
||||
// r.roots and the callback are not necessarily safe to use in the
|
||||
// goroutine below. Process them eagerly.
|
||||
roots := filterRoots(r.roots, callback.rootFound)
|
||||
// We can't cancel walks, because we need them to finish to have a usable
|
||||
// cache. Instead, run them in a separate goroutine and detach.
|
||||
scanDone := make(chan struct{})
|
||||
go func() {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.scanSema:
|
||||
}
|
||||
defer func() { r.scanSema <- struct{}{} }()
|
||||
// We have the lock on r.scannedRoots, and no other scans can run.
|
||||
for _, root := range roots {
|
||||
if ctx.Err() != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// If we want package names, make sure the cache has them.
|
||||
if loadNames {
|
||||
var err error
|
||||
if info, err = r.cachePackageName(info); err != nil {
|
||||
if r.scannedRoots[root] {
|
||||
continue
|
||||
}
|
||||
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: r.env.Logf, ModulesEnabled: true})
|
||||
r.scannedRoots[root] = true
|
||||
}
|
||||
|
||||
res, err := r.canonicalize(info)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
result = append(result, res)
|
||||
close(scanDone)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case <-scanDone:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return result, nil
|
||||
func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) int {
|
||||
if _, ok := stdlib[path]; ok {
|
||||
return MaxRelevance
|
||||
}
|
||||
mod, _ := r.findPackage(path)
|
||||
return modRelevance(mod)
|
||||
}
|
||||
|
||||
func modRelevance(mod *gocommand.ModuleJSON) int {
|
||||
switch {
|
||||
case mod == nil: // out of scope
|
||||
return MaxRelevance - 4
|
||||
case mod.Indirect:
|
||||
return MaxRelevance - 3
|
||||
case !mod.Main:
|
||||
return MaxRelevance - 2
|
||||
default:
|
||||
return MaxRelevance - 1 // main module ties with stdlib
|
||||
}
|
||||
}
|
||||
|
||||
// canonicalize gets the result of canonicalizing the packages using the results
|
||||
@@ -428,15 +509,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
importPathShort: info.nonCanonicalImportPath,
|
||||
dir: info.dir,
|
||||
packageName: path.Base(info.nonCanonicalImportPath),
|
||||
relevance: 0,
|
||||
relevance: MaxRelevance,
|
||||
}, nil
|
||||
}
|
||||
|
||||
importPath := info.nonCanonicalImportPath
|
||||
relevance := 2
|
||||
mod := r.findModuleByDir(info.dir)
|
||||
// Check if the directory is underneath a module that's in scope.
|
||||
if mod := r.findModuleByDir(info.dir); mod != nil {
|
||||
relevance = 1
|
||||
if mod != nil {
|
||||
// It is. If dir is the target of a replace directive,
|
||||
// our guessed import path is wrong. Use the real one.
|
||||
if mod.Dir == info.dir {
|
||||
@@ -445,15 +525,16 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
dirInMod := info.dir[len(mod.Dir)+len("/"):]
|
||||
importPath = path.Join(mod.Path, filepath.ToSlash(dirInMod))
|
||||
}
|
||||
} else if info.needsReplace {
|
||||
} else if !strings.HasPrefix(importPath, info.moduleName) {
|
||||
// The module's name doesn't match the package's import path. It
|
||||
// probably needs a replace directive we don't have.
|
||||
return nil, fmt.Errorf("package in %q is not valid without a replace statement", info.dir)
|
||||
}
|
||||
|
||||
res := &pkg{
|
||||
importPathShort: importPath,
|
||||
dir: info.dir,
|
||||
packageName: info.packageName, // may not be populated if the caller didn't ask for it
|
||||
relevance: relevance,
|
||||
relevance: modRelevance(mod),
|
||||
}
|
||||
// We may have discovered a package that has a different version
|
||||
// in scope already. Canonicalize to that one if possible.
|
||||
@@ -463,14 +544,14 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg) (string, []string, error) {
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok {
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
|
||||
return r.cacheExports(ctx, r.env, info)
|
||||
}
|
||||
return loadExportsFromFiles(ctx, r.env, pkg.dir)
|
||||
return loadExportsFromFiles(ctx, r.env, pkg.dir, includeTest)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
@@ -488,7 +569,7 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
|
||||
}
|
||||
switch root.Type {
|
||||
case gopathwalk.RootCurrentModule:
|
||||
importPath = path.Join(r.Main.Path, filepath.ToSlash(subdir))
|
||||
importPath = path.Join(r.main.Path, filepath.ToSlash(subdir))
|
||||
case gopathwalk.RootModuleCache:
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
if len(matches) == 0 {
|
||||
@@ -497,9 +578,9 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
|
||||
err: fmt.Errorf("invalid module cache path: %v", subdir),
|
||||
}
|
||||
}
|
||||
modPath, err := module.DecodePath(filepath.ToSlash(matches[1]))
|
||||
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if r.env.Debug {
|
||||
if r.env.Logf != nil {
|
||||
r.env.Logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return directoryPackageInfo{
|
||||
@@ -516,7 +597,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
|
||||
dir: dir,
|
||||
rootType: root.Type,
|
||||
nonCanonicalImportPath: importPath,
|
||||
needsReplace: false,
|
||||
moduleDir: modDir,
|
||||
moduleName: modName,
|
||||
}
|
||||
@@ -524,14 +604,6 @@ func (r *ModuleResolver) scanDirForPackage(root gopathwalk.Root, dir string) dir
|
||||
// stdlib packages are always in scope, despite the confusing go.mod
|
||||
return result
|
||||
}
|
||||
// Check that this package is not obviously impossible to import.
|
||||
if !strings.HasPrefix(importPath, modName) {
|
||||
// The module's declared path does not match
|
||||
// its expected path. It probably needs a
|
||||
// replace directive we don't have.
|
||||
result.needsReplace = true
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
@@ -581,63 +653,3 @@ func modulePath(mod []byte) string {
|
||||
}
|
||||
return "" // missing module path
|
||||
}
|
||||
|
||||
var modFlagRegexp = regexp.MustCompile(`-mod[ =](\w+)`)
|
||||
|
||||
// vendorEnabled indicates if vendoring is enabled.
|
||||
// Inspired by setDefaultBuildMod in modload/init.go
|
||||
func vendorEnabled(env *ProcessEnv) (*ModuleJSON, bool, error) {
|
||||
mainMod, go114, err := getMainModuleAnd114(env)
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
matches := modFlagRegexp.FindStringSubmatch(env.GOFLAGS)
|
||||
var modFlag string
|
||||
if len(matches) != 0 {
|
||||
modFlag = matches[1]
|
||||
}
|
||||
if modFlag != "" {
|
||||
// Don't override an explicit '-mod=' argument.
|
||||
return mainMod, modFlag == "vendor", nil
|
||||
}
|
||||
if mainMod == nil || !go114 {
|
||||
return mainMod, false, nil
|
||||
}
|
||||
// Check 1.14's automatic vendor mode.
|
||||
if fi, err := os.Stat(filepath.Join(mainMod.Dir, "vendor")); err == nil && fi.IsDir() {
|
||||
if mainMod.GoVersion != "" && semver.Compare("v"+mainMod.GoVersion, "v1.14") >= 0 {
|
||||
// The Go version is at least 1.14, and a vendor directory exists.
|
||||
// Set -mod=vendor by default.
|
||||
return mainMod, true, nil
|
||||
}
|
||||
}
|
||||
return mainMod, false, nil
|
||||
}
|
||||
|
||||
// getMainModuleAnd114 gets the main module's information and whether the
|
||||
// go command in use is 1.14+. This is the information needed to figure out
|
||||
// if vendoring should be enabled.
|
||||
func getMainModuleAnd114(env *ProcessEnv) (*ModuleJSON, bool, error) {
|
||||
const format = `{{.Path}}
|
||||
{{.Dir}}
|
||||
{{.GoMod}}
|
||||
{{.GoVersion}}
|
||||
{{range context.ReleaseTags}}{{if eq . "go1.14"}}{{.}}{{end}}{{end}}
|
||||
`
|
||||
stdout, err := env.invokeGo("list", "-m", "-f", format)
|
||||
if err != nil {
|
||||
return nil, false, nil
|
||||
}
|
||||
lines := strings.Split(stdout.String(), "\n")
|
||||
if len(lines) < 5 {
|
||||
return nil, false, fmt.Errorf("unexpected stdout: %q", stdout)
|
||||
}
|
||||
mod := &ModuleJSON{
|
||||
Path: lines[0],
|
||||
Dir: lines[1],
|
||||
GoMod: lines[2],
|
||||
GoVersion: lines[3],
|
||||
Main: true,
|
||||
}
|
||||
return mod, lines[4] == "go1.14", nil
|
||||
}
|
||||
|
95
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
95
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
@@ -49,10 +49,6 @@ type directoryPackageInfo struct {
|
||||
// nonCanonicalImportPath is the package's expected import path. It may
|
||||
// not actually be importable at that path.
|
||||
nonCanonicalImportPath string
|
||||
// needsReplace is true if the nonCanonicalImportPath does not match the
|
||||
// module's declared path, making it impossible to import without a
|
||||
// replace directive.
|
||||
needsReplace bool
|
||||
|
||||
// Module-related information.
|
||||
moduleDir string // The directory that is the module root of this dir.
|
||||
@@ -97,15 +93,86 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
|
||||
type dirInfoCache struct {
|
||||
mu sync.Mutex
|
||||
// dirs stores information about packages in directories, keyed by absolute path.
|
||||
dirs map[string]*directoryPackageInfo
|
||||
dirs map[string]*directoryPackageInfo
|
||||
listeners map[*int]cacheListener
|
||||
}
|
||||
|
||||
type cacheListener func(directoryPackageInfo)
|
||||
|
||||
// ScanAndListen calls listener on all the items in the cache, and on anything
|
||||
// newly added. The returned stop function waits for all in-flight callbacks to
|
||||
// finish and blocks new ones.
|
||||
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Flushing out all the callbacks is tricky without knowing how many there
|
||||
// are going to be. Setting an arbitrary limit makes it much easier.
|
||||
const maxInFlight = 10
|
||||
sema := make(chan struct{}, maxInFlight)
|
||||
for i := 0; i < maxInFlight; i++ {
|
||||
sema <- struct{}{}
|
||||
}
|
||||
|
||||
cookie := new(int) // A unique ID we can use for the listener.
|
||||
|
||||
// We can't hold mu while calling the listener.
|
||||
d.mu.Lock()
|
||||
var keys []string
|
||||
for key := range d.dirs {
|
||||
keys = append(keys, key)
|
||||
}
|
||||
d.listeners[cookie] = func(info directoryPackageInfo) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-sema:
|
||||
}
|
||||
listener(info)
|
||||
sema <- struct{}{}
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
stop := func() {
|
||||
cancel()
|
||||
d.mu.Lock()
|
||||
delete(d.listeners, cookie)
|
||||
d.mu.Unlock()
|
||||
for i := 0; i < maxInFlight; i++ {
|
||||
<-sema
|
||||
}
|
||||
}
|
||||
|
||||
// Process the pre-existing keys.
|
||||
for _, k := range keys {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return stop
|
||||
default:
|
||||
}
|
||||
if v, ok := d.Load(k); ok {
|
||||
listener(v)
|
||||
}
|
||||
}
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
// Store stores the package info for dir.
|
||||
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
stored := info // defensive copy
|
||||
d.dirs[dir] = &stored
|
||||
_, old := d.dirs[dir]
|
||||
d.dirs[dir] = &info
|
||||
var listeners []cacheListener
|
||||
for _, l := range d.listeners {
|
||||
listeners = append(listeners, l)
|
||||
}
|
||||
d.mu.Unlock()
|
||||
|
||||
if !old {
|
||||
for _, l := range listeners {
|
||||
l(info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
||||
@@ -129,17 +196,17 @@ func (d *dirInfoCache) Keys() (keys []string) {
|
||||
return keys
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (directoryPackageInfo, error) {
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
return info, err
|
||||
return info.packageName, err
|
||||
}
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
return info, fmt.Errorf("cannot read package name, scan error: %v", err)
|
||||
return "", fmt.Errorf("cannot read package name, scan error: %v", err)
|
||||
}
|
||||
info.packageName, info.err = packageDirToName(info.dir)
|
||||
info.status = nameLoaded
|
||||
d.Store(info.dir, info)
|
||||
return info, info.err
|
||||
return info.packageName, info.err
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
@@ -149,8 +216,8 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d
|
||||
if reached, err := info.reachedStatus(nameLoaded); reached && err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir)
|
||||
if info.err == context.Canceled {
|
||||
info.packageName, info.exports, info.err = loadExportsFromFiles(ctx, env, info.dir, false)
|
||||
if info.err == context.Canceled || info.err == context.DeadlineExceeded {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
// The cache structure wants things to proceed linearly. We can skip a
|
||||
|
87
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
87
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
@@ -415,6 +415,9 @@ var stdlib = map[string][]string{
|
||||
"crypto/tls": []string{
|
||||
"Certificate",
|
||||
"CertificateRequestInfo",
|
||||
"CipherSuite",
|
||||
"CipherSuiteName",
|
||||
"CipherSuites",
|
||||
"Client",
|
||||
"ClientAuthType",
|
||||
"ClientHelloInfo",
|
||||
@@ -434,6 +437,7 @@ var stdlib = map[string][]string{
|
||||
"ECDSAWithP521AndSHA512",
|
||||
"ECDSAWithSHA1",
|
||||
"Ed25519",
|
||||
"InsecureCipherSuites",
|
||||
"Listen",
|
||||
"LoadX509KeyPair",
|
||||
"NewLRUClientSessionCache",
|
||||
@@ -465,6 +469,7 @@ var stdlib = map[string][]string{
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
|
||||
"TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
|
||||
"TLS_ECDHE_ECDSA_WITH_RC4_128_SHA",
|
||||
"TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
"TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
|
||||
@@ -473,6 +478,7 @@ var stdlib = map[string][]string{
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA",
|
||||
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384",
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305",
|
||||
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256",
|
||||
"TLS_ECDHE_RSA_WITH_RC4_128_SHA",
|
||||
"TLS_FALLBACK_SCSV",
|
||||
"TLS_RSA_WITH_3DES_EDE_CBC_SHA",
|
||||
@@ -698,36 +704,65 @@ var stdlib = map[string][]string{
|
||||
"Attr",
|
||||
"AttrAbstractOrigin",
|
||||
"AttrAccessibility",
|
||||
"AttrAddrBase",
|
||||
"AttrAddrClass",
|
||||
"AttrAlignment",
|
||||
"AttrAllocated",
|
||||
"AttrArtificial",
|
||||
"AttrAssociated",
|
||||
"AttrBaseTypes",
|
||||
"AttrBinaryScale",
|
||||
"AttrBitOffset",
|
||||
"AttrBitSize",
|
||||
"AttrByteSize",
|
||||
"AttrCallAllCalls",
|
||||
"AttrCallAllSourceCalls",
|
||||
"AttrCallAllTailCalls",
|
||||
"AttrCallColumn",
|
||||
"AttrCallDataLocation",
|
||||
"AttrCallDataValue",
|
||||
"AttrCallFile",
|
||||
"AttrCallLine",
|
||||
"AttrCallOrigin",
|
||||
"AttrCallPC",
|
||||
"AttrCallParameter",
|
||||
"AttrCallReturnPC",
|
||||
"AttrCallTailCall",
|
||||
"AttrCallTarget",
|
||||
"AttrCallTargetClobbered",
|
||||
"AttrCallValue",
|
||||
"AttrCalling",
|
||||
"AttrCommonRef",
|
||||
"AttrCompDir",
|
||||
"AttrConstExpr",
|
||||
"AttrConstValue",
|
||||
"AttrContainingType",
|
||||
"AttrCount",
|
||||
"AttrDataBitOffset",
|
||||
"AttrDataLocation",
|
||||
"AttrDataMemberLoc",
|
||||
"AttrDecimalScale",
|
||||
"AttrDecimalSign",
|
||||
"AttrDeclColumn",
|
||||
"AttrDeclFile",
|
||||
"AttrDeclLine",
|
||||
"AttrDeclaration",
|
||||
"AttrDefaultValue",
|
||||
"AttrDefaulted",
|
||||
"AttrDeleted",
|
||||
"AttrDescription",
|
||||
"AttrDigitCount",
|
||||
"AttrDiscr",
|
||||
"AttrDiscrList",
|
||||
"AttrDiscrValue",
|
||||
"AttrDwoName",
|
||||
"AttrElemental",
|
||||
"AttrEncoding",
|
||||
"AttrEndianity",
|
||||
"AttrEntrypc",
|
||||
"AttrEnumClass",
|
||||
"AttrExplicit",
|
||||
"AttrExportSymbols",
|
||||
"AttrExtension",
|
||||
"AttrExternal",
|
||||
"AttrFrameBase",
|
||||
@@ -738,27 +773,47 @@ var stdlib = map[string][]string{
|
||||
"AttrInline",
|
||||
"AttrIsOptional",
|
||||
"AttrLanguage",
|
||||
"AttrLinkageName",
|
||||
"AttrLocation",
|
||||
"AttrLoclistsBase",
|
||||
"AttrLowerBound",
|
||||
"AttrLowpc",
|
||||
"AttrMacroInfo",
|
||||
"AttrMacros",
|
||||
"AttrMainSubprogram",
|
||||
"AttrMutable",
|
||||
"AttrName",
|
||||
"AttrNamelistItem",
|
||||
"AttrNoreturn",
|
||||
"AttrObjectPointer",
|
||||
"AttrOrdering",
|
||||
"AttrPictureString",
|
||||
"AttrPriority",
|
||||
"AttrProducer",
|
||||
"AttrPrototyped",
|
||||
"AttrPure",
|
||||
"AttrRanges",
|
||||
"AttrRank",
|
||||
"AttrRecursive",
|
||||
"AttrReference",
|
||||
"AttrReturnAddr",
|
||||
"AttrRnglistsBase",
|
||||
"AttrRvalueReference",
|
||||
"AttrSegment",
|
||||
"AttrSibling",
|
||||
"AttrSignature",
|
||||
"AttrSmall",
|
||||
"AttrSpecification",
|
||||
"AttrStartScope",
|
||||
"AttrStaticLink",
|
||||
"AttrStmtList",
|
||||
"AttrStrOffsetsBase",
|
||||
"AttrStride",
|
||||
"AttrStrideSize",
|
||||
"AttrStringLength",
|
||||
"AttrStringLengthBitSize",
|
||||
"AttrStringLengthByteSize",
|
||||
"AttrThreadsScaled",
|
||||
"AttrTrampoline",
|
||||
"AttrType",
|
||||
"AttrUpperBound",
|
||||
@@ -772,18 +827,23 @@ var stdlib = map[string][]string{
|
||||
"BoolType",
|
||||
"CharType",
|
||||
"Class",
|
||||
"ClassAddrPtr",
|
||||
"ClassAddress",
|
||||
"ClassBlock",
|
||||
"ClassConstant",
|
||||
"ClassExprLoc",
|
||||
"ClassFlag",
|
||||
"ClassLinePtr",
|
||||
"ClassLocList",
|
||||
"ClassLocListPtr",
|
||||
"ClassMacPtr",
|
||||
"ClassRangeListPtr",
|
||||
"ClassReference",
|
||||
"ClassReferenceAlt",
|
||||
"ClassReferenceSig",
|
||||
"ClassRngList",
|
||||
"ClassRngListsPtr",
|
||||
"ClassStrOffsetsPtr",
|
||||
"ClassString",
|
||||
"ClassStringAlt",
|
||||
"ClassUnknown",
|
||||
@@ -814,9 +874,13 @@ var stdlib = map[string][]string{
|
||||
"Tag",
|
||||
"TagAccessDeclaration",
|
||||
"TagArrayType",
|
||||
"TagAtomicType",
|
||||
"TagBaseType",
|
||||
"TagCallSite",
|
||||
"TagCallSiteParameter",
|
||||
"TagCatchDwarfBlock",
|
||||
"TagClassType",
|
||||
"TagCoarrayType",
|
||||
"TagCommonDwarfBlock",
|
||||
"TagCommonInclusion",
|
||||
"TagCompileUnit",
|
||||
@@ -824,12 +888,15 @@ var stdlib = map[string][]string{
|
||||
"TagConstType",
|
||||
"TagConstant",
|
||||
"TagDwarfProcedure",
|
||||
"TagDynamicType",
|
||||
"TagEntryPoint",
|
||||
"TagEnumerationType",
|
||||
"TagEnumerator",
|
||||
"TagFileType",
|
||||
"TagFormalParameter",
|
||||
"TagFriend",
|
||||
"TagGenericSubrange",
|
||||
"TagImmutableType",
|
||||
"TagImportedDeclaration",
|
||||
"TagImportedModule",
|
||||
"TagImportedUnit",
|
||||
@@ -853,6 +920,7 @@ var stdlib = map[string][]string{
|
||||
"TagRvalueReferenceType",
|
||||
"TagSetType",
|
||||
"TagSharedType",
|
||||
"TagSkeletonUnit",
|
||||
"TagStringType",
|
||||
"TagStructType",
|
||||
"TagSubprogram",
|
||||
@@ -2359,6 +2427,7 @@ var stdlib = map[string][]string{
|
||||
"RawValue",
|
||||
"StructuralError",
|
||||
"SyntaxError",
|
||||
"TagBMPString",
|
||||
"TagBitString",
|
||||
"TagBoolean",
|
||||
"TagEnum",
|
||||
@@ -2787,6 +2856,7 @@ var stdlib = map[string][]string{
|
||||
"IsPredeclared",
|
||||
"Mode",
|
||||
"New",
|
||||
"NewFromFiles",
|
||||
"Note",
|
||||
"Package",
|
||||
"PreserveAST",
|
||||
@@ -3115,6 +3185,11 @@ var stdlib = map[string][]string{
|
||||
"New64",
|
||||
"New64a",
|
||||
},
|
||||
"hash/maphash": []string{
|
||||
"Hash",
|
||||
"MakeSeed",
|
||||
"Seed",
|
||||
},
|
||||
"html": []string{
|
||||
"EscapeString",
|
||||
"UnescapeString",
|
||||
@@ -3367,6 +3442,7 @@ var stdlib = map[string][]string{
|
||||
"Ldate",
|
||||
"Llongfile",
|
||||
"Lmicroseconds",
|
||||
"Lmsgprefix",
|
||||
"Logger",
|
||||
"Lshortfile",
|
||||
"LstdFlags",
|
||||
@@ -3443,6 +3519,7 @@ var stdlib = map[string][]string{
|
||||
"Exp",
|
||||
"Exp2",
|
||||
"Expm1",
|
||||
"FMA",
|
||||
"Float32bits",
|
||||
"Float32frombits",
|
||||
"Float64bits",
|
||||
@@ -3567,6 +3644,9 @@ var stdlib = map[string][]string{
|
||||
"OnesCount32",
|
||||
"OnesCount64",
|
||||
"OnesCount8",
|
||||
"Rem",
|
||||
"Rem32",
|
||||
"Rem64",
|
||||
"Reverse",
|
||||
"Reverse16",
|
||||
"Reverse32",
|
||||
@@ -5140,7 +5220,10 @@ var stdlib = map[string][]string{
|
||||
"CTL_NET",
|
||||
"CTL_QUERY",
|
||||
"CTRL_BREAK_EVENT",
|
||||
"CTRL_CLOSE_EVENT",
|
||||
"CTRL_C_EVENT",
|
||||
"CTRL_LOGOFF_EVENT",
|
||||
"CTRL_SHUTDOWN_EVENT",
|
||||
"CancelIo",
|
||||
"CancelIoEx",
|
||||
"CertAddCertificateContextToStore",
|
||||
@@ -10112,6 +10195,7 @@ var stdlib = map[string][]string{
|
||||
"Duployan",
|
||||
"Egyptian_Hieroglyphs",
|
||||
"Elbasan",
|
||||
"Elymaic",
|
||||
"Ethiopic",
|
||||
"Extender",
|
||||
"FoldCategory",
|
||||
@@ -10215,6 +10299,7 @@ var stdlib = map[string][]string{
|
||||
"Myanmar",
|
||||
"N",
|
||||
"Nabataean",
|
||||
"Nandinagari",
|
||||
"Nd",
|
||||
"New_Tai_Lue",
|
||||
"Newa",
|
||||
@@ -10224,6 +10309,7 @@ var stdlib = map[string][]string{
|
||||
"Noncharacter_Code_Point",
|
||||
"Number",
|
||||
"Nushu",
|
||||
"Nyiakeng_Puachue_Hmong",
|
||||
"Ogham",
|
||||
"Ol_Chiki",
|
||||
"Old_Hungarian",
|
||||
@@ -10331,6 +10417,7 @@ var stdlib = map[string][]string{
|
||||
"Vai",
|
||||
"Variation_Selector",
|
||||
"Version",
|
||||
"Wancho",
|
||||
"Warang_Citi",
|
||||
"White_Space",
|
||||
"Yi",
|
||||
|
24
vendor/golang.org/x/tools/internal/packagesinternal/BUILD
generated
vendored
Normal file
24
vendor/golang.org/x/tools/internal/packagesinternal/BUILD
generated
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["packages.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/packagesinternal",
|
||||
importpath = "golang.org/x/tools/internal/packagesinternal",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = ["//vendor/golang.org/x/tools/internal/gocommand:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "package-srcs",
|
||||
srcs = glob(["**"]),
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:private"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "all-srcs",
|
||||
srcs = [":package-srcs"],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
14
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
Normal file
14
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
// Package packagesinternal exposes internal-only fields from go/packages.
|
||||
package packagesinternal
|
||||
|
||||
import (
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
)
|
||||
|
||||
var GetForTest = func(p interface{}) string { return "" }
|
||||
|
||||
var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil }
|
||||
|
||||
var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {}
|
||||
|
||||
var TypecheckCgo int
|
100
vendor/golang.org/x/tools/internal/span/parse.go
generated
vendored
100
vendor/golang.org/x/tools/internal/span/parse.go
generated
vendored
@@ -1,100 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Parse returns the location represented by the input.
|
||||
// All inputs are valid locations, as they can always be a pure filename.
|
||||
// The returned span will be normalized, and thus if printed may produce a
|
||||
// different string.
|
||||
func Parse(input string) Span {
|
||||
// :0:0#0-0:0#0
|
||||
valid := input
|
||||
var hold, offset int
|
||||
hadCol := false
|
||||
suf := rstripSuffix(input)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep == ":" {
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
hadCol = true
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
switch {
|
||||
case suf.sep == ":":
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), Point{})
|
||||
case suf.sep == "-":
|
||||
// we have a span, fall out of the case to continue
|
||||
default:
|
||||
// separator not valid, rewind to either the : or the start
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), Point{})
|
||||
}
|
||||
// only the span form can get here
|
||||
// at this point we still don't know what the numbers we have mean
|
||||
// if have not yet seen a : then we might have either a line or a column depending
|
||||
// on whether start has a column or not
|
||||
// we build an end point and will fix it later if needed
|
||||
end := NewPoint(suf.num, hold, offset)
|
||||
hold, offset = 0, 0
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep == "#" {
|
||||
offset = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
}
|
||||
if suf.sep != ":" {
|
||||
// turns out we don't have a span after all, rewind
|
||||
return New(NewURI(valid), end, Point{})
|
||||
}
|
||||
valid = suf.remains
|
||||
hold = suf.num
|
||||
suf = rstripSuffix(suf.remains)
|
||||
if suf.sep != ":" {
|
||||
// line#offset only
|
||||
return New(NewURI(valid), NewPoint(hold, 0, offset), end)
|
||||
}
|
||||
// we have a column, so if end only had one number, it is also the column
|
||||
if !hadCol {
|
||||
end = NewPoint(suf.num, end.v.Line, end.v.Offset)
|
||||
}
|
||||
return New(NewURI(suf.remains), NewPoint(suf.num, hold, offset), end)
|
||||
}
|
||||
|
||||
type suffix struct {
|
||||
remains string
|
||||
sep string
|
||||
num int
|
||||
}
|
||||
|
||||
func rstripSuffix(input string) suffix {
|
||||
if len(input) == 0 {
|
||||
return suffix{"", "", -1}
|
||||
}
|
||||
remains := input
|
||||
num := -1
|
||||
// first see if we have a number at the end
|
||||
last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
|
||||
if last >= 0 && last < len(remains)-1 {
|
||||
number, err := strconv.ParseInt(remains[last+1:], 10, 64)
|
||||
if err == nil {
|
||||
num = int(number)
|
||||
remains = remains[:last+1]
|
||||
}
|
||||
}
|
||||
// now see if we have a trailing separator
|
||||
r, w := utf8.DecodeLastRuneInString(remains)
|
||||
if r != ':' && r != '#' && r == '#' {
|
||||
return suffix{input, "", -1}
|
||||
}
|
||||
remains = remains[:len(remains)-w]
|
||||
return suffix{remains, string(r), num}
|
||||
}
|
285
vendor/golang.org/x/tools/internal/span/span.go
generated
vendored
285
vendor/golang.org/x/tools/internal/span/span.go
generated
vendored
@@ -1,285 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package span contains support for representing with positions and ranges in
|
||||
// text files.
|
||||
package span
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"path"
|
||||
)
|
||||
|
||||
// Span represents a source code range in standardized form.
|
||||
type Span struct {
|
||||
v span
|
||||
}
|
||||
|
||||
// Point represents a single point within a file.
|
||||
// In general this should only be used as part of a Span, as on its own it
|
||||
// does not carry enough information.
|
||||
type Point struct {
|
||||
v point
|
||||
}
|
||||
|
||||
type span struct {
|
||||
URI URI `json:"uri"`
|
||||
Start point `json:"start"`
|
||||
End point `json:"end"`
|
||||
}
|
||||
|
||||
type point struct {
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
Offset int `json:"offset"`
|
||||
}
|
||||
|
||||
// Invalid is a span that reports false from IsValid
|
||||
var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
|
||||
|
||||
var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
|
||||
|
||||
// Converter is the interface to an object that can convert between line:column
|
||||
// and offset forms for a single file.
|
||||
type Converter interface {
|
||||
//ToPosition converts from an offset to a line:column pair.
|
||||
ToPosition(offset int) (int, int, error)
|
||||
//ToOffset converts from a line:column pair to an offset.
|
||||
ToOffset(line, col int) (int, error)
|
||||
}
|
||||
|
||||
func New(uri URI, start Point, end Point) Span {
|
||||
s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
|
||||
s.v.clean()
|
||||
return s
|
||||
}
|
||||
|
||||
func NewPoint(line, col, offset int) Point {
|
||||
p := Point{v: point{Line: line, Column: col, Offset: offset}}
|
||||
p.v.clean()
|
||||
return p
|
||||
}
|
||||
|
||||
func Compare(a, b Span) int {
|
||||
if r := CompareURI(a.URI(), b.URI()); r != 0 {
|
||||
return r
|
||||
}
|
||||
if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
|
||||
return r
|
||||
}
|
||||
return comparePoint(a.v.End, b.v.End)
|
||||
}
|
||||
|
||||
func ComparePoint(a, b Point) int {
|
||||
return comparePoint(a.v, b.v)
|
||||
}
|
||||
|
||||
func comparePoint(a, b point) int {
|
||||
if !a.hasPosition() {
|
||||
if a.Offset < b.Offset {
|
||||
return -1
|
||||
}
|
||||
if a.Offset > b.Offset {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
if a.Line < b.Line {
|
||||
return -1
|
||||
}
|
||||
if a.Line > b.Line {
|
||||
return 1
|
||||
}
|
||||
if a.Column < b.Column {
|
||||
return -1
|
||||
}
|
||||
if a.Column > b.Column {
|
||||
return 1
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
|
||||
func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
|
||||
func (s Span) IsValid() bool { return s.v.Start.isValid() }
|
||||
func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
|
||||
func (s Span) URI() URI { return s.v.URI }
|
||||
func (s Span) Start() Point { return Point{s.v.Start} }
|
||||
func (s Span) End() Point { return Point{s.v.End} }
|
||||
func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
|
||||
func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
|
||||
|
||||
func (p Point) HasPosition() bool { return p.v.hasPosition() }
|
||||
func (p Point) HasOffset() bool { return p.v.hasOffset() }
|
||||
func (p Point) IsValid() bool { return p.v.isValid() }
|
||||
func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
|
||||
func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
|
||||
func (p Point) Line() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Line
|
||||
}
|
||||
func (p Point) Column() int {
|
||||
if !p.v.hasPosition() {
|
||||
panic(fmt.Errorf("position not set in %v", p.v))
|
||||
}
|
||||
return p.v.Column
|
||||
}
|
||||
func (p Point) Offset() int {
|
||||
if !p.v.hasOffset() {
|
||||
panic(fmt.Errorf("offset not set in %v", p.v))
|
||||
}
|
||||
return p.v.Offset
|
||||
}
|
||||
|
||||
func (p point) hasPosition() bool { return p.Line > 0 }
|
||||
func (p point) hasOffset() bool { return p.Offset >= 0 }
|
||||
func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
|
||||
func (p point) isZero() bool {
|
||||
return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
|
||||
}
|
||||
|
||||
func (s *span) clean() {
|
||||
//this presumes the points are already clean
|
||||
if !s.End.isValid() || (s.End == point{}) {
|
||||
s.End = s.Start
|
||||
}
|
||||
}
|
||||
|
||||
func (p *point) clean() {
|
||||
if p.Line < 0 {
|
||||
p.Line = 0
|
||||
}
|
||||
if p.Column <= 0 {
|
||||
if p.Line > 0 {
|
||||
p.Column = 1
|
||||
} else {
|
||||
p.Column = 0
|
||||
}
|
||||
}
|
||||
if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
|
||||
p.Offset = -1
|
||||
}
|
||||
}
|
||||
|
||||
// Format implements fmt.Formatter to print the Location in a standard form.
|
||||
// The format produced is one that can be read back in using Parse.
|
||||
func (s Span) Format(f fmt.State, c rune) {
|
||||
fullForm := f.Flag('+')
|
||||
preferOffset := f.Flag('#')
|
||||
// we should always have a uri, simplify if it is file format
|
||||
//TODO: make sure the end of the uri is unambiguous
|
||||
uri := string(s.v.URI)
|
||||
if c == 'f' {
|
||||
uri = path.Base(uri)
|
||||
} else if !fullForm {
|
||||
uri = s.v.URI.Filename()
|
||||
}
|
||||
fmt.Fprint(f, uri)
|
||||
if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
|
||||
return
|
||||
}
|
||||
// see which bits of start to write
|
||||
printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
|
||||
printLine := s.HasPosition() && (fullForm || !printOffset)
|
||||
printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
|
||||
fmt.Fprint(f, ":")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.Start.Line)
|
||||
}
|
||||
if printColumn {
|
||||
fmt.Fprintf(f, ":%d", s.v.Start.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.Start.Offset)
|
||||
}
|
||||
// start is written, do we need end?
|
||||
if s.IsPoint() {
|
||||
return
|
||||
}
|
||||
// we don't print the line if it did not change
|
||||
printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
|
||||
fmt.Fprint(f, "-")
|
||||
if printLine {
|
||||
fmt.Fprintf(f, "%d", s.v.End.Line)
|
||||
}
|
||||
if printColumn {
|
||||
if printLine {
|
||||
fmt.Fprint(f, ":")
|
||||
}
|
||||
fmt.Fprintf(f, "%d", s.v.End.Column)
|
||||
}
|
||||
if printOffset {
|
||||
fmt.Fprintf(f, "#%d", s.v.End.Offset)
|
||||
}
|
||||
}
|
||||
|
||||
func (s Span) WithPosition(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, false); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithOffset(c Converter) (Span, error) {
|
||||
if err := s.update(c, false, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s Span) WithAll(c Converter) (Span, error) {
|
||||
if err := s.update(c, true, true); err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *Span) update(c Converter, withPos, withOffset bool) error {
|
||||
if !s.IsValid() {
|
||||
return fmt.Errorf("cannot add information to an invalid span")
|
||||
}
|
||||
if withPos && !s.HasPosition() {
|
||||
if err := s.v.Start.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Offset == s.v.Start.Offset {
|
||||
s.v.End = s.v.Start
|
||||
} else if err := s.v.End.updatePosition(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
|
||||
if err := s.v.Start.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
|
||||
s.v.End.Offset = s.v.Start.Offset
|
||||
} else if err := s.v.End.updateOffset(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updatePosition(c Converter) error {
|
||||
line, col, err := c.ToPosition(p.Offset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Line = line
|
||||
p.Column = col
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *point) updateOffset(c Converter) error {
|
||||
offset, err := c.ToOffset(p.Line, p.Column)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.Offset = offset
|
||||
return nil
|
||||
}
|
151
vendor/golang.org/x/tools/internal/span/token.go
generated
vendored
151
vendor/golang.org/x/tools/internal/span/token.go
generated
vendored
@@ -1,151 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// Range represents a source code range in token.Pos form.
|
||||
// It also carries the FileSet that produced the positions, so that it is
|
||||
// self contained.
|
||||
type Range struct {
|
||||
FileSet *token.FileSet
|
||||
Start token.Pos
|
||||
End token.Pos
|
||||
}
|
||||
|
||||
// TokenConverter is a Converter backed by a token file set and file.
|
||||
// It uses the file set methods to work out the conversions, which
|
||||
// makes it fast and does not require the file contents.
|
||||
type TokenConverter struct {
|
||||
fset *token.FileSet
|
||||
file *token.File
|
||||
}
|
||||
|
||||
// NewRange creates a new Range from a FileSet and two positions.
|
||||
// To represent a point pass a 0 as the end pos.
|
||||
func NewRange(fset *token.FileSet, start, end token.Pos) Range {
|
||||
return Range{
|
||||
FileSet: fset,
|
||||
Start: start,
|
||||
End: end,
|
||||
}
|
||||
}
|
||||
|
||||
// NewTokenConverter returns an implementation of Converter backed by a
|
||||
// token.File.
|
||||
func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// NewContentConverter returns an implementation of Converter for the
|
||||
// given file content.
|
||||
func NewContentConverter(filename string, content []byte) *TokenConverter {
|
||||
fset := token.NewFileSet()
|
||||
f := fset.AddFile(filename, -1, len(content))
|
||||
f.SetLinesForContent(content)
|
||||
return &TokenConverter{fset: fset, file: f}
|
||||
}
|
||||
|
||||
// IsPoint returns true if the range represents a single point.
|
||||
func (r Range) IsPoint() bool {
|
||||
return r.Start == r.End
|
||||
}
|
||||
|
||||
// Span converts a Range to a Span that represents the Range.
|
||||
// It will fill in all the members of the Span, calculating the line and column
|
||||
// information.
|
||||
func (r Range) Span() (Span, error) {
|
||||
f := r.FileSet.File(r.Start)
|
||||
if f == nil {
|
||||
return Span{}, fmt.Errorf("file not found in FileSet")
|
||||
}
|
||||
s := Span{v: span{URI: FileURI(f.Name())}}
|
||||
var err error
|
||||
s.v.Start.Offset, err = offset(f, r.Start)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
if r.End.IsValid() {
|
||||
s.v.End.Offset, err = offset(f, r.End)
|
||||
if err != nil {
|
||||
return Span{}, err
|
||||
}
|
||||
}
|
||||
s.v.Start.clean()
|
||||
s.v.End.clean()
|
||||
s.v.clean()
|
||||
converter := NewTokenConverter(r.FileSet, f)
|
||||
return s.WithPosition(converter)
|
||||
}
|
||||
|
||||
// offset is a copy of the Offset function in go/token, but with the adjustment
|
||||
// that it does not panic on invalid positions.
|
||||
func offset(f *token.File, pos token.Pos) (int, error) {
|
||||
if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
|
||||
return 0, fmt.Errorf("invalid pos")
|
||||
}
|
||||
return int(pos) - f.Base(), nil
|
||||
}
|
||||
|
||||
// Range converts a Span to a Range that represents the Span for the supplied
|
||||
// File.
|
||||
func (s Span) Range(converter *TokenConverter) (Range, error) {
|
||||
s, err := s.WithOffset(converter)
|
||||
if err != nil {
|
||||
return Range{}, err
|
||||
}
|
||||
// go/token will panic if the offset is larger than the file's size,
|
||||
// so check here to avoid panicking.
|
||||
if s.Start().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
|
||||
}
|
||||
if s.End().Offset() > converter.file.Size() {
|
||||
return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
|
||||
}
|
||||
return Range{
|
||||
FileSet: converter.fset,
|
||||
Start: converter.file.Pos(s.Start().Offset()),
|
||||
End: converter.file.Pos(s.End().Offset()),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToPosition(offset int) (int, int, error) {
|
||||
if offset > l.file.Size() {
|
||||
return 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, l.file.Size())
|
||||
}
|
||||
pos := l.file.Pos(offset)
|
||||
p := l.fset.Position(pos)
|
||||
if offset == l.file.Size() {
|
||||
return p.Line + 1, 1, nil
|
||||
}
|
||||
return p.Line, p.Column, nil
|
||||
}
|
||||
|
||||
func (l *TokenConverter) ToOffset(line, col int) (int, error) {
|
||||
if line < 0 {
|
||||
return -1, fmt.Errorf("line is not valid")
|
||||
}
|
||||
lineMax := l.file.LineCount() + 1
|
||||
if line > lineMax {
|
||||
return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
|
||||
} else if line == lineMax {
|
||||
if col > 1 {
|
||||
return -1, fmt.Errorf("column is beyond end of file")
|
||||
}
|
||||
// at the end of the file, allowing for a trailing eol
|
||||
return l.file.Size(), nil
|
||||
}
|
||||
pos := lineStart(l.file, line)
|
||||
if !pos.IsValid() {
|
||||
return -1, fmt.Errorf("line is not in file")
|
||||
}
|
||||
// we assume that column is in bytes here, and that the first byte of a
|
||||
// line is at column 1
|
||||
pos += token.Pos(col - 1)
|
||||
return offset(l.file, pos)
|
||||
}
|
39
vendor/golang.org/x/tools/internal/span/token111.go
generated
vendored
39
vendor/golang.org/x/tools/internal/span/token111.go
generated
vendored
@@ -1,39 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
|
||||
// versions <= 1.11, we borrow logic from the analysisutil package.
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
// Use binary search to find the start offset of this line.
|
||||
|
||||
min := 0 // inclusive
|
||||
max := f.Size() // exclusive
|
||||
for {
|
||||
offset := (min + max) / 2
|
||||
pos := f.Pos(offset)
|
||||
posn := f.Position(pos)
|
||||
if posn.Line == line {
|
||||
return pos - (token.Pos(posn.Column) - 1)
|
||||
}
|
||||
|
||||
if min+1 >= max {
|
||||
return token.NoPos
|
||||
}
|
||||
|
||||
if posn.Line < line {
|
||||
min = offset
|
||||
} else {
|
||||
max = offset
|
||||
}
|
||||
}
|
||||
}
|
16
vendor/golang.org/x/tools/internal/span/token112.go
generated
vendored
16
vendor/golang.org/x/tools/internal/span/token112.go
generated
vendored
@@ -1,16 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
)
|
||||
|
||||
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
|
||||
func lineStart(f *token.File, line int) token.Pos {
|
||||
return f.LineStart(line)
|
||||
}
|
152
vendor/golang.org/x/tools/internal/span/uri.go
generated
vendored
152
vendor/golang.org/x/tools/internal/span/uri.go
generated
vendored
@@ -1,152 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const fileScheme = "file"
|
||||
|
||||
// URI represents the full URI for a file.
|
||||
type URI string
|
||||
|
||||
// Filename returns the file path for the given URI.
|
||||
// It is an error to call this on a URI that is not a valid filename.
|
||||
func (uri URI) Filename() string {
|
||||
filename, err := filename(uri)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return filepath.FromSlash(filename)
|
||||
}
|
||||
|
||||
func filename(uri URI) (string, error) {
|
||||
if uri == "" {
|
||||
return "", nil
|
||||
}
|
||||
u, err := url.ParseRequestURI(string(uri))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if u.Scheme != fileScheme {
|
||||
return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
|
||||
}
|
||||
if isWindowsDriveURI(u.Path) {
|
||||
u.Path = u.Path[1:]
|
||||
}
|
||||
return u.Path, nil
|
||||
}
|
||||
|
||||
// NewURI returns a span URI for the string.
|
||||
// It will attempt to detect if the string is a file path or uri.
|
||||
func NewURI(s string) URI {
|
||||
if u, err := url.PathUnescape(s); err == nil {
|
||||
s = u
|
||||
}
|
||||
if strings.HasPrefix(s, fileScheme+"://") {
|
||||
return URI(s)
|
||||
}
|
||||
return FileURI(s)
|
||||
}
|
||||
|
||||
func CompareURI(a, b URI) int {
|
||||
if equalURI(a, b) {
|
||||
return 0
|
||||
}
|
||||
if a < b {
|
||||
return -1
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
func equalURI(a, b URI) bool {
|
||||
if a == b {
|
||||
return true
|
||||
}
|
||||
// If we have the same URI basename, we may still have the same file URIs.
|
||||
if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
|
||||
return false
|
||||
}
|
||||
fa, err := filename(a)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fb, err := filename(b)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
// Stat the files to check if they are equal.
|
||||
infoa, err := os.Stat(filepath.FromSlash(fa))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
infob, err := os.Stat(filepath.FromSlash(fb))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return os.SameFile(infoa, infob)
|
||||
}
|
||||
|
||||
// FileURI returns a span URI for the supplied file path.
|
||||
// It will always have the file scheme.
|
||||
func FileURI(path string) URI {
|
||||
if path == "" {
|
||||
return ""
|
||||
}
|
||||
// Handle standard library paths that contain the literal "$GOROOT".
|
||||
// TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
|
||||
const prefix = "$GOROOT"
|
||||
if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
|
||||
suffix := path[len(prefix):]
|
||||
path = runtime.GOROOT() + suffix
|
||||
}
|
||||
if !isWindowsDrivePath(path) {
|
||||
if abs, err := filepath.Abs(path); err == nil {
|
||||
path = abs
|
||||
}
|
||||
}
|
||||
// Check the file path again, in case it became absolute.
|
||||
if isWindowsDrivePath(path) {
|
||||
path = "/" + path
|
||||
}
|
||||
path = filepath.ToSlash(path)
|
||||
u := url.URL{
|
||||
Scheme: fileScheme,
|
||||
Path: path,
|
||||
}
|
||||
uri := u.String()
|
||||
if unescaped, err := url.PathUnescape(uri); err == nil {
|
||||
uri = unescaped
|
||||
}
|
||||
return URI(uri)
|
||||
}
|
||||
|
||||
// isWindowsDrivePath returns true if the file path is of the form used by
|
||||
// Windows. We check if the path begins with a drive letter, followed by a ":".
|
||||
func isWindowsDrivePath(path string) bool {
|
||||
if len(path) < 4 {
|
||||
return false
|
||||
}
|
||||
return unicode.IsLetter(rune(path[0])) && path[1] == ':'
|
||||
}
|
||||
|
||||
// isWindowsDriveURI returns true if the file URI is of the format used by
|
||||
// Windows URIs. The url.Parse package does not specially handle Windows paths
|
||||
// (see https://golang.org/issue/6027). We check if the URI path has
|
||||
// a drive prefix (e.g. "/C:"). If so, we trim the leading "/".
|
||||
func isWindowsDriveURI(uri string) bool {
|
||||
if len(uri) < 4 {
|
||||
return false
|
||||
}
|
||||
return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
|
||||
}
|
94
vendor/golang.org/x/tools/internal/span/utf16.go
generated
vendored
94
vendor/golang.org/x/tools/internal/span/utf16.go
generated
vendored
@@ -1,94 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package span
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ToUTF16Column calculates the utf16 column expressed by the point given the
|
||||
// supplied file contents.
|
||||
// This is used to convert from the native (always in bytes) column
|
||||
// representation and the utf16 counts used by some editors.
|
||||
func ToUTF16Column(p Point, content []byte) (int, error) {
|
||||
if content == nil {
|
||||
return -1, fmt.Errorf("ToUTF16Column: missing content")
|
||||
}
|
||||
if !p.HasPosition() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing position")
|
||||
}
|
||||
if !p.HasOffset() {
|
||||
return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
|
||||
}
|
||||
offset := p.Offset() // 0-based
|
||||
colZero := p.Column() - 1 // 0-based
|
||||
if colZero == 0 {
|
||||
// 0-based column 0, so it must be chr 1
|
||||
return 1, nil
|
||||
} else if colZero < 0 {
|
||||
return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
|
||||
}
|
||||
// work out the offset at the start of the line using the column
|
||||
lineOffset := offset - colZero
|
||||
if lineOffset < 0 || offset > len(content) {
|
||||
return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
|
||||
}
|
||||
// Use the offset to pick out the line start.
|
||||
// This cannot panic: offset > len(content) and lineOffset < offset.
|
||||
start := content[lineOffset:]
|
||||
|
||||
// Now, truncate down to the supplied column.
|
||||
start = start[:colZero]
|
||||
|
||||
// and count the number of utf16 characters
|
||||
// in theory we could do this by hand more efficiently...
|
||||
return len(utf16.Encode([]rune(string(start)))) + 1, nil
|
||||
}
|
||||
|
||||
// FromUTF16Column advances the point by the utf16 character offset given the
|
||||
// supplied line contents.
|
||||
// This is used to convert from the utf16 counts used by some editors to the
|
||||
// native (always in bytes) column representation.
|
||||
func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
|
||||
if !p.HasOffset() {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
|
||||
}
|
||||
// if chr is 1 then no adjustment needed
|
||||
if chr <= 1 {
|
||||
return p, nil
|
||||
}
|
||||
if p.Offset() >= len(content) {
|
||||
return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
|
||||
}
|
||||
remains := content[p.Offset():]
|
||||
// scan forward the specified number of characters
|
||||
for count := 1; count < chr; count++ {
|
||||
if len(remains) <= 0 {
|
||||
return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
|
||||
}
|
||||
r, w := utf8.DecodeRune(remains)
|
||||
if r == '\n' {
|
||||
// Per the LSP spec:
|
||||
//
|
||||
// > If the character value is greater than the line length it
|
||||
// > defaults back to the line length.
|
||||
break
|
||||
}
|
||||
remains = remains[w:]
|
||||
if r >= 0x10000 {
|
||||
// a two point rune
|
||||
count++
|
||||
// if we finished in a two point rune, do not advance past the first
|
||||
if count >= chr {
|
||||
break
|
||||
}
|
||||
}
|
||||
p.v.Column += w
|
||||
p.v.Offset += w
|
||||
}
|
||||
return p, nil
|
||||
}
|
@@ -2,11 +2,10 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["module.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/module",
|
||||
importpath = "golang.org/x/tools/internal/module",
|
||||
srcs = ["types.go"],
|
||||
importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/internal/typesinternal",
|
||||
importpath = "golang.org/x/tools/internal/typesinternal",
|
||||
visibility = ["//vendor/golang.org/x/tools:__subpackages__"],
|
||||
deps = ["//vendor/golang.org/x/tools/internal/semver:go_default_library"],
|
||||
)
|
||||
|
||||
filegroup(
|
28
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
Normal file
28
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2020 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typesinternal
|
||||
|
||||
import (
|
||||
"go/types"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func SetUsesCgo(conf *types.Config) bool {
|
||||
v := reflect.ValueOf(conf).Elem()
|
||||
|
||||
f := v.FieldByName("go115UsesCgo")
|
||||
if !f.IsValid() {
|
||||
f = v.FieldByName("UsesCgo")
|
||||
if !f.IsValid() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
addr := unsafe.Pointer(f.UnsafeAddr())
|
||||
*(*bool)(addr) = true
|
||||
|
||||
return true
|
||||
}
|
Reference in New Issue
Block a user