Update continuity, go-winio and hcsshim
Update dependencies and remove the local bindfilter files. Those have been moved to go-winio. Signed-off-by: Gabriel Adrian Samfira <gsamfira@cloudbasesolutions.com>
This commit is contained in:
34
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
34
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -878,12 +878,19 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// never has to create a types.Package for an indirect dependency,
|
||||
// which would then require that such created packages be explicitly
|
||||
// inserted back into the Import graph as a final step after export data loading.
|
||||
// (Hence this return is after the Types assignment.)
|
||||
// The Diamond test exercises this case.
|
||||
if !lpkg.needtypes && !lpkg.needsrc {
|
||||
return
|
||||
}
|
||||
if !lpkg.needsrc {
|
||||
ld.loadFromExportData(lpkg)
|
||||
if err := ld.loadFromExportData(lpkg); err != nil {
|
||||
lpkg.Errors = append(lpkg.Errors, Error{
|
||||
Pos: "-",
|
||||
Msg: err.Error(),
|
||||
Kind: UnknownError, // e.g. can't find/open/parse export data
|
||||
})
|
||||
}
|
||||
return // not a source package, don't get syntax trees
|
||||
}
|
||||
|
||||
@@ -970,7 +977,8 @@ func (ld *loader) loadPackage(lpkg *loaderPackage) {
|
||||
// The config requested loading sources and types, but sources are missing.
|
||||
// Add an error to the package and fall back to loading from export data.
|
||||
appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
|
||||
ld.loadFromExportData(lpkg)
|
||||
_ = ld.loadFromExportData(lpkg) // ignore any secondary errors
|
||||
|
||||
return // can't get syntax trees for this package
|
||||
}
|
||||
|
||||
@@ -1194,9 +1202,10 @@ func sameFile(x, y string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// loadFromExportData returns type information for the specified
|
||||
// loadFromExportData ensures that type information is present for the specified
|
||||
// package, loading it from an export data file on the first request.
|
||||
func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
|
||||
// On success it sets lpkg.Types to a new Package.
|
||||
func (ld *loader) loadFromExportData(lpkg *loaderPackage) error {
|
||||
if lpkg.PkgPath == "" {
|
||||
log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
|
||||
}
|
||||
@@ -1207,8 +1216,8 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
// must be sequential. (Finer-grained locking would require
|
||||
// changes to the gcexportdata API.)
|
||||
//
|
||||
// The exportMu lock guards the Package.Pkg field and the
|
||||
// types.Package it points to, for each Package in the graph.
|
||||
// The exportMu lock guards the lpkg.Types field and the
|
||||
// types.Package it points to, for each loaderPackage in the graph.
|
||||
//
|
||||
// Not all accesses to Package.Pkg need to be protected by exportMu:
|
||||
// graph ordering ensures that direct dependencies of source
|
||||
@@ -1217,18 +1226,18 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
defer ld.exportMu.Unlock()
|
||||
|
||||
if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
|
||||
return tpkg, nil // cache hit
|
||||
return nil // cache hit
|
||||
}
|
||||
|
||||
lpkg.IllTyped = true // fail safe
|
||||
|
||||
if lpkg.ExportFile == "" {
|
||||
// Errors while building export data will have been printed to stderr.
|
||||
return nil, fmt.Errorf("no export data file")
|
||||
return fmt.Errorf("no export data file")
|
||||
}
|
||||
f, err := os.Open(lpkg.ExportFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
@@ -1240,7 +1249,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
// queries.)
|
||||
r, err := gcexportdata.NewReader(f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
}
|
||||
|
||||
// Build the view.
|
||||
@@ -1284,7 +1293,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
// (May modify incomplete packages in view but not create new ones.)
|
||||
tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
return fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
|
||||
}
|
||||
if _, ok := view["go.shape"]; ok {
|
||||
// Account for the pseudopackage "go.shape" that gets
|
||||
@@ -1297,8 +1306,7 @@ func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error
|
||||
|
||||
lpkg.Types = tpkg
|
||||
lpkg.IllTyped = false
|
||||
|
||||
return tpkg, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// impliedLoadMode returns loadMode with its dependencies.
|
||||
|
||||
128
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
128
vendor/golang.org/x/tools/internal/gcimporter/iexport.go
generated
vendored
@@ -22,6 +22,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/tokeninternal"
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
@@ -138,6 +139,17 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver
|
||||
p.doDecl(p.declTodo.popHead())
|
||||
}
|
||||
|
||||
// Produce index of offset of each file record in files.
|
||||
var files intWriter
|
||||
var fileOffset []uint64 // fileOffset[i] is offset in files of file encoded as i
|
||||
if p.shallow {
|
||||
fileOffset = make([]uint64, len(p.fileInfos))
|
||||
for i, info := range p.fileInfos {
|
||||
fileOffset[i] = uint64(files.Len())
|
||||
p.encodeFile(&files, info.file, info.needed)
|
||||
}
|
||||
}
|
||||
|
||||
// Append indices to data0 section.
|
||||
dataLen := uint64(p.data0.Len())
|
||||
w := p.newWriter()
|
||||
@@ -163,16 +175,75 @@ func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, ver
|
||||
}
|
||||
hdr.uint64(uint64(p.version))
|
||||
hdr.uint64(uint64(p.strings.Len()))
|
||||
if p.shallow {
|
||||
hdr.uint64(uint64(files.Len()))
|
||||
hdr.uint64(uint64(len(fileOffset)))
|
||||
for _, offset := range fileOffset {
|
||||
hdr.uint64(offset)
|
||||
}
|
||||
}
|
||||
hdr.uint64(dataLen)
|
||||
|
||||
// Flush output.
|
||||
io.Copy(out, &hdr)
|
||||
io.Copy(out, &p.strings)
|
||||
if p.shallow {
|
||||
io.Copy(out, &files)
|
||||
}
|
||||
io.Copy(out, &p.data0)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// encodeFile writes to w a representation of the file sufficient to
|
||||
// faithfully restore position information about all needed offsets.
|
||||
// Mutates the needed array.
|
||||
func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64) {
|
||||
_ = needed[0] // precondition: needed is non-empty
|
||||
|
||||
w.uint64(p.stringOff(file.Name()))
|
||||
|
||||
size := uint64(file.Size())
|
||||
w.uint64(size)
|
||||
|
||||
// Sort the set of needed offsets. Duplicates are harmless.
|
||||
sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
|
||||
|
||||
lines := tokeninternal.GetLines(file) // byte offset of each line start
|
||||
w.uint64(uint64(len(lines)))
|
||||
|
||||
// Rather than record the entire array of line start offsets,
|
||||
// we save only a sparse list of (index, offset) pairs for
|
||||
// the start of each line that contains a needed position.
|
||||
var sparse [][2]int // (index, offset) pairs
|
||||
outer:
|
||||
for i, lineStart := range lines {
|
||||
lineEnd := size
|
||||
if i < len(lines)-1 {
|
||||
lineEnd = uint64(lines[i+1])
|
||||
}
|
||||
// Does this line contains a needed offset?
|
||||
if needed[0] < lineEnd {
|
||||
sparse = append(sparse, [2]int{i, lineStart})
|
||||
for needed[0] < lineEnd {
|
||||
needed = needed[1:]
|
||||
if len(needed) == 0 {
|
||||
break outer
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Delta-encode the columns.
|
||||
w.uint64(uint64(len(sparse)))
|
||||
var prev [2]int
|
||||
for _, pair := range sparse {
|
||||
w.uint64(uint64(pair[0] - prev[0]))
|
||||
w.uint64(uint64(pair[1] - prev[1]))
|
||||
prev = pair
|
||||
}
|
||||
}
|
||||
|
||||
// writeIndex writes out an object index. mainIndex indicates whether
|
||||
// we're writing out the main index, which is also read by
|
||||
// non-compiler tools and includes a complete package description
|
||||
@@ -255,6 +326,12 @@ type iexporter struct {
|
||||
strings intWriter
|
||||
stringIndex map[string]uint64
|
||||
|
||||
// In shallow mode, object positions are encoded as (file, offset).
|
||||
// Each file is recorded as a line-number table.
|
||||
// Only the lines of needed positions are saved faithfully.
|
||||
fileInfo map[*token.File]uint64 // value is index in fileInfos
|
||||
fileInfos []*filePositions
|
||||
|
||||
data0 intWriter
|
||||
declIndex map[types.Object]uint64
|
||||
tparamNames map[types.Object]string // typeparam->exported name
|
||||
@@ -263,6 +340,11 @@ type iexporter struct {
|
||||
indent int // for tracing support
|
||||
}
|
||||
|
||||
type filePositions struct {
|
||||
file *token.File
|
||||
needed []uint64 // unordered list of needed file offsets
|
||||
}
|
||||
|
||||
func (p *iexporter) trace(format string, args ...interface{}) {
|
||||
if !trace {
|
||||
// Call sites should also be guarded, but having this check here allows
|
||||
@@ -286,6 +368,25 @@ func (p *iexporter) stringOff(s string) uint64 {
|
||||
return off
|
||||
}
|
||||
|
||||
// fileIndexAndOffset returns the index of the token.File and the byte offset of pos within it.
|
||||
func (p *iexporter) fileIndexAndOffset(file *token.File, pos token.Pos) (uint64, uint64) {
|
||||
index, ok := p.fileInfo[file]
|
||||
if !ok {
|
||||
index = uint64(len(p.fileInfo))
|
||||
p.fileInfos = append(p.fileInfos, &filePositions{file: file})
|
||||
if p.fileInfo == nil {
|
||||
p.fileInfo = make(map[*token.File]uint64)
|
||||
}
|
||||
p.fileInfo[file] = index
|
||||
}
|
||||
// Record each needed offset.
|
||||
info := p.fileInfos[index]
|
||||
offset := uint64(file.Offset(pos))
|
||||
info.needed = append(info.needed, offset)
|
||||
|
||||
return index, offset
|
||||
}
|
||||
|
||||
// pushDecl adds n to the declaration work queue, if not already present.
|
||||
func (p *iexporter) pushDecl(obj types.Object) {
|
||||
// Package unsafe is known to the compiler and predeclared.
|
||||
@@ -346,7 +447,13 @@ func (p *iexporter) doDecl(obj types.Object) {
|
||||
case *types.Func:
|
||||
sig, _ := obj.Type().(*types.Signature)
|
||||
if sig.Recv() != nil {
|
||||
panic(internalErrorf("unexpected method: %v", sig))
|
||||
// We shouldn't see methods in the package scope,
|
||||
// but the type checker may repair "func () F() {}"
|
||||
// to "func (Invalid) F()" and then treat it like "func F()",
|
||||
// so allow that. See golang/go#57729.
|
||||
if sig.Recv().Type() != types.Typ[types.Invalid] {
|
||||
panic(internalErrorf("unexpected method: %v", sig))
|
||||
}
|
||||
}
|
||||
|
||||
// Function.
|
||||
@@ -458,13 +565,30 @@ func (w *exportWriter) tag(tag byte) {
|
||||
}
|
||||
|
||||
func (w *exportWriter) pos(pos token.Pos) {
|
||||
if w.p.version >= iexportVersionPosCol {
|
||||
if w.p.shallow {
|
||||
w.posV2(pos)
|
||||
} else if w.p.version >= iexportVersionPosCol {
|
||||
w.posV1(pos)
|
||||
} else {
|
||||
w.posV0(pos)
|
||||
}
|
||||
}
|
||||
|
||||
// posV2 encoding (used only in shallow mode) records positions as
|
||||
// (file, offset), where file is the index in the token.File table
|
||||
// (which records the file name and newline offsets) and offset is a
|
||||
// byte offset. It effectively ignores //line directives.
|
||||
func (w *exportWriter) posV2(pos token.Pos) {
|
||||
if pos == token.NoPos {
|
||||
w.uint64(0)
|
||||
return
|
||||
}
|
||||
file := w.p.fset.File(pos) // fset must be non-nil
|
||||
index, offset := w.p.fileIndexAndOffset(file, pos)
|
||||
w.uint64(1 + index)
|
||||
w.uint64(offset)
|
||||
}
|
||||
|
||||
func (w *exportWriter) posV1(pos token.Pos) {
|
||||
if w.p.fset == nil {
|
||||
w.int64(0)
|
||||
|
||||
82
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
82
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
@@ -137,12 +137,23 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
|
||||
}
|
||||
|
||||
sLen := int64(r.uint64())
|
||||
var fLen int64
|
||||
var fileOffset []uint64
|
||||
if insert != nil {
|
||||
// Shallow mode uses a different position encoding.
|
||||
fLen = int64(r.uint64())
|
||||
fileOffset = make([]uint64, r.uint64())
|
||||
for i := range fileOffset {
|
||||
fileOffset[i] = r.uint64()
|
||||
}
|
||||
}
|
||||
dLen := int64(r.uint64())
|
||||
|
||||
whence, _ := r.Seek(0, io.SeekCurrent)
|
||||
stringData := data[whence : whence+sLen]
|
||||
declData := data[whence+sLen : whence+sLen+dLen]
|
||||
r.Seek(sLen+dLen, io.SeekCurrent)
|
||||
fileData := data[whence+sLen : whence+sLen+fLen]
|
||||
declData := data[whence+sLen+fLen : whence+sLen+fLen+dLen]
|
||||
r.Seek(sLen+fLen+dLen, io.SeekCurrent)
|
||||
|
||||
p := iimporter{
|
||||
version: int(version),
|
||||
@@ -151,6 +162,9 @@ func iimportCommon(fset *token.FileSet, imports map[string]*types.Package, data
|
||||
|
||||
stringData: stringData,
|
||||
stringCache: make(map[uint64]string),
|
||||
fileOffset: fileOffset,
|
||||
fileData: fileData,
|
||||
fileCache: make([]*token.File, len(fileOffset)),
|
||||
pkgCache: make(map[uint64]*types.Package),
|
||||
|
||||
declData: declData,
|
||||
@@ -280,6 +294,9 @@ type iimporter struct {
|
||||
|
||||
stringData []byte
|
||||
stringCache map[uint64]string
|
||||
fileOffset []uint64 // fileOffset[i] is offset in fileData for info about file encoded as i
|
||||
fileData []byte
|
||||
fileCache []*token.File // memoized decoding of file encoded as i
|
||||
pkgCache map[uint64]*types.Package
|
||||
|
||||
declData []byte
|
||||
@@ -352,6 +369,55 @@ func (p *iimporter) stringAt(off uint64) string {
|
||||
return s
|
||||
}
|
||||
|
||||
func (p *iimporter) fileAt(index uint64) *token.File {
|
||||
file := p.fileCache[index]
|
||||
if file == nil {
|
||||
off := p.fileOffset[index]
|
||||
file = p.decodeFile(intReader{bytes.NewReader(p.fileData[off:]), p.ipath})
|
||||
p.fileCache[index] = file
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func (p *iimporter) decodeFile(rd intReader) *token.File {
|
||||
filename := p.stringAt(rd.uint64())
|
||||
size := int(rd.uint64())
|
||||
file := p.fake.fset.AddFile(filename, -1, size)
|
||||
|
||||
// SetLines requires a nondecreasing sequence.
|
||||
// Because it is common for clients to derive the interval
|
||||
// [start, start+len(name)] from a start position, and we
|
||||
// want to ensure that the end offset is on the same line,
|
||||
// we fill in the gaps of the sparse encoding with values
|
||||
// that strictly increase by the largest possible amount.
|
||||
// This allows us to avoid having to record the actual end
|
||||
// offset of each needed line.
|
||||
|
||||
lines := make([]int, int(rd.uint64()))
|
||||
var index, offset int
|
||||
for i, n := 0, int(rd.uint64()); i < n; i++ {
|
||||
index += int(rd.uint64())
|
||||
offset += int(rd.uint64())
|
||||
lines[index] = offset
|
||||
|
||||
// Ensure monotonicity between points.
|
||||
for j := index - 1; j > 0 && lines[j] == 0; j-- {
|
||||
lines[j] = lines[j+1] - 1
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure monotonicity after last point.
|
||||
for j := len(lines) - 1; j > 0 && lines[j] == 0; j-- {
|
||||
size--
|
||||
lines[j] = size
|
||||
}
|
||||
|
||||
if !file.SetLines(lines) {
|
||||
errorf("SetLines failed: %d", lines) // can't happen
|
||||
}
|
||||
return file
|
||||
}
|
||||
|
||||
func (p *iimporter) pkgAt(off uint64) *types.Package {
|
||||
if pkg, ok := p.pkgCache[off]; ok {
|
||||
return pkg
|
||||
@@ -645,6 +711,9 @@ func (r *importReader) qualifiedIdent() (*types.Package, string) {
|
||||
}
|
||||
|
||||
func (r *importReader) pos() token.Pos {
|
||||
if r.p.insert != nil { // shallow mode
|
||||
return r.posv2()
|
||||
}
|
||||
if r.p.version >= iexportVersionPosCol {
|
||||
r.posv1()
|
||||
} else {
|
||||
@@ -681,6 +750,15 @@ func (r *importReader) posv1() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *importReader) posv2() token.Pos {
|
||||
file := r.uint64()
|
||||
if file == 0 {
|
||||
return token.NoPos
|
||||
}
|
||||
tf := r.p.fileAt(file - 1)
|
||||
return tf.Pos(int(r.uint64()))
|
||||
}
|
||||
|
||||
func (r *importReader) typ() types.Type {
|
||||
return r.p.typAt(r.uint64(), nil)
|
||||
}
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
2
vendor/golang.org/x/tools/internal/pkgbits/decoder.go
generated
vendored
@@ -373,7 +373,7 @@ func (r *Decoder) Int64() int64 {
|
||||
return r.rawVarint()
|
||||
}
|
||||
|
||||
// Int64 decodes and returns a uint64 value from the element bitstream.
|
||||
// Uint64 decodes and returns a uint64 value from the element bitstream.
|
||||
func (r *Decoder) Uint64() uint64 {
|
||||
r.Sync(SyncUint64)
|
||||
return r.rawUvarint()
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
2
vendor/golang.org/x/tools/internal/pkgbits/encoder.go
generated
vendored
@@ -293,7 +293,7 @@ func (w *Encoder) Len(x int) { assert(x >= 0); w.Uint64(uint64(x)) }
|
||||
// Int encodes and writes an int value into the element bitstream.
|
||||
func (w *Encoder) Int(x int) { w.Int64(int64(x)) }
|
||||
|
||||
// Len encodes and writes a uint value into the element bitstream.
|
||||
// Uint encodes and writes a uint value into the element bitstream.
|
||||
func (w *Encoder) Uint(x uint) { w.Uint64(uint64(x)) }
|
||||
|
||||
// Reloc encodes and writes a relocation for the given (section,
|
||||
|
||||
59
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
Normal file
59
vendor/golang.org/x/tools/internal/tokeninternal/tokeninternal.go
generated
vendored
Normal file
@@ -0,0 +1,59 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// package tokeninternal provides access to some internal features of the token
|
||||
// package.
|
||||
package tokeninternal
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"sync"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// GetLines returns the table of line-start offsets from a token.File.
|
||||
func GetLines(file *token.File) []int {
|
||||
// token.File has a Lines method on Go 1.21 and later.
|
||||
if file, ok := (interface{})(file).(interface{ Lines() []int }); ok {
|
||||
return file.Lines()
|
||||
}
|
||||
|
||||
// This declaration must match that of token.File.
|
||||
// This creates a risk of dependency skew.
|
||||
// For now we check that the size of the two
|
||||
// declarations is the same, on the (fragile) assumption
|
||||
// that future changes would add fields.
|
||||
type tokenFile119 struct {
|
||||
_ string
|
||||
_ int
|
||||
_ int
|
||||
mu sync.Mutex // we're not complete monsters
|
||||
lines []int
|
||||
_ []struct{}
|
||||
}
|
||||
type tokenFile118 struct {
|
||||
_ *token.FileSet // deleted in go1.19
|
||||
tokenFile119
|
||||
}
|
||||
|
||||
type uP = unsafe.Pointer
|
||||
switch unsafe.Sizeof(*file) {
|
||||
case unsafe.Sizeof(tokenFile118{}):
|
||||
var ptr *tokenFile118
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
case unsafe.Sizeof(tokenFile119{}):
|
||||
var ptr *tokenFile119
|
||||
*(*uP)(uP(&ptr)) = uP(file)
|
||||
ptr.mu.Lock()
|
||||
defer ptr.mu.Unlock()
|
||||
return ptr.lines
|
||||
|
||||
default:
|
||||
panic("unexpected token.File size")
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user