Re-vendor latest kube-openapi and gengo/v2
./hack/pin-dependency.sh k8s.io/kube-openapi latest ./hack/pin-dependency.sh k8s.io/gengo/v2 latest ./hack/update-vendor.sh
This commit is contained in:
12
vendor/golang.org/x/net/html/token.go
generated
vendored
12
vendor/golang.org/x/net/html/token.go
generated
vendored
@@ -910,9 +910,6 @@ func (z *Tokenizer) readTagAttrKey() {
|
||||
return
|
||||
}
|
||||
switch c {
|
||||
case ' ', '\n', '\r', '\t', '\f', '/':
|
||||
z.pendingAttr[0].end = z.raw.end - 1
|
||||
return
|
||||
case '=':
|
||||
if z.pendingAttr[0].start+1 == z.raw.end {
|
||||
// WHATWG 13.2.5.32, if we see an equals sign before the attribute name
|
||||
@@ -920,7 +917,9 @@ func (z *Tokenizer) readTagAttrKey() {
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
case '>':
|
||||
case ' ', '\n', '\r', '\t', '\f', '/', '>':
|
||||
// WHATWG 13.2.5.33 Attribute name state
|
||||
// We need to reconsume the char in the after attribute name state to support the / character
|
||||
z.raw.end--
|
||||
z.pendingAttr[0].end = z.raw.end
|
||||
return
|
||||
@@ -939,6 +938,11 @@ func (z *Tokenizer) readTagAttrVal() {
|
||||
if z.err != nil {
|
||||
return
|
||||
}
|
||||
if c == '/' {
|
||||
// WHATWG 13.2.5.34 After attribute name state
|
||||
// U+002F SOLIDUS (/) - Switch to the self-closing start tag state.
|
||||
return
|
||||
}
|
||||
if c != '=' {
|
||||
z.raw.end--
|
||||
return
|
||||
|
||||
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
11
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@@ -1510,13 +1510,12 @@ func (mh *MetaHeadersFrame) checkPseudos() error {
|
||||
}
|
||||
|
||||
func (fr *Framer) maxHeaderStringLen() int {
|
||||
v := fr.maxHeaderListSize()
|
||||
if uint32(int(v)) == v {
|
||||
return int(v)
|
||||
v := int(fr.maxHeaderListSize())
|
||||
if v < 0 {
|
||||
// If maxHeaderListSize overflows an int, use no limit (0).
|
||||
return 0
|
||||
}
|
||||
// They had a crazy big number for MaxHeaderBytes anyway,
|
||||
// so give them unlimited header lengths:
|
||||
return 0
|
||||
return v
|
||||
}
|
||||
|
||||
// readMetaFrame returns 0 or more CONTINUATION frames from fr and
|
||||
|
||||
3
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
3
vendor/golang.org/x/sync/errgroup/errgroup.go
generated
vendored
@@ -4,6 +4,9 @@
|
||||
|
||||
// Package errgroup provides synchronization, error propagation, and Context
|
||||
// cancelation for groups of goroutines working on subtasks of a common task.
|
||||
//
|
||||
// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks
|
||||
// returning errors.
|
||||
package errgroup
|
||||
|
||||
import (
|
||||
|
||||
40
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
40
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
@@ -15,22 +15,10 @@ Load passes most patterns directly to the underlying build tool.
|
||||
The default build tool is the go command.
|
||||
Its supported patterns are described at
|
||||
https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns.
|
||||
Other build systems may be supported by providing a "driver";
|
||||
see [The driver protocol].
|
||||
|
||||
Load may be used in Go projects that use alternative build systems, by
|
||||
installing an appropriate "driver" program for the build system and
|
||||
specifying its location in the GOPACKAGESDRIVER environment variable.
|
||||
For example,
|
||||
https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
|
||||
explains how to use the driver for Bazel.
|
||||
The driver program is responsible for interpreting patterns in its
|
||||
preferred notation and reporting information about the packages that
|
||||
they identify.
|
||||
(See driverRequest and driverResponse types for the JSON
|
||||
schema used by the protocol.
|
||||
Though the protocol is supported, these types are currently unexported;
|
||||
see #64608 for a proposal to publish them.)
|
||||
|
||||
Regardless of driver, all patterns with the prefix "query=", where query is a
|
||||
All patterns with the prefix "query=", where query is a
|
||||
non-empty string of letters from [a-z], are reserved and may be
|
||||
interpreted as query operators.
|
||||
|
||||
@@ -86,7 +74,29 @@ for details.
|
||||
Most tools should pass their command-line arguments (after any flags)
|
||||
uninterpreted to [Load], so that it can interpret them
|
||||
according to the conventions of the underlying build system.
|
||||
|
||||
See the Example function for typical usage.
|
||||
|
||||
# The driver protocol
|
||||
|
||||
[Load] may be used to load Go packages even in Go projects that use
|
||||
alternative build systems, by installing an appropriate "driver"
|
||||
program for the build system and specifying its location in the
|
||||
GOPACKAGESDRIVER environment variable.
|
||||
For example,
|
||||
https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
|
||||
explains how to use the driver for Bazel.
|
||||
|
||||
The driver program is responsible for interpreting patterns in its
|
||||
preferred notation and reporting information about the packages that
|
||||
those patterns identify. Drivers must also support the special "file="
|
||||
and "pattern=" patterns described above.
|
||||
|
||||
The patterns are provided as positional command-line arguments. A
|
||||
JSON-encoded [DriverRequest] message providing additional information
|
||||
is written to the driver's standard input. The driver must write a
|
||||
JSON-encoded [DriverResponse] message to its standard output. (This
|
||||
message differs from the JSON schema produced by 'go list'.)
|
||||
*/
|
||||
package packages // import "golang.org/x/tools/go/packages"
|
||||
|
||||
|
||||
77
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
77
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
@@ -2,12 +2,11 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file enables an external tool to intercept package requests.
|
||||
// If the tool is present then its results are used in preference to
|
||||
// the go list command.
|
||||
|
||||
package packages
|
||||
|
||||
// This file defines the protocol that enables an external "driver"
|
||||
// tool to supply package metadata in place of 'go list'.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
@@ -17,31 +16,71 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// The Driver Protocol
|
||||
// DriverRequest defines the schema of a request for package metadata
|
||||
// from an external driver program. The JSON-encoded DriverRequest
|
||||
// message is provided to the driver program's standard input. The
|
||||
// query patterns are provided as command-line arguments.
|
||||
//
|
||||
// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
|
||||
// This allows for different build systems to support go/packages by telling go/packages how the
|
||||
// packages' source is organized.
|
||||
// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
|
||||
// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
|
||||
// documentation in doc.go for the full description of the patterns that need to be supported.
|
||||
// A driver receives as a JSON-serialized driverRequest struct in standard input and will
|
||||
// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
|
||||
|
||||
// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
|
||||
type driverRequest struct {
|
||||
// See the package documentation for an overview.
|
||||
type DriverRequest struct {
|
||||
Mode LoadMode `json:"mode"`
|
||||
|
||||
// Env specifies the environment the underlying build system should be run in.
|
||||
Env []string `json:"env"`
|
||||
|
||||
// BuildFlags are flags that should be passed to the underlying build system.
|
||||
BuildFlags []string `json:"build_flags"`
|
||||
|
||||
// Tests specifies whether the patterns should also return test packages.
|
||||
Tests bool `json:"tests"`
|
||||
|
||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
||||
// of overlay files.
|
||||
Overlay map[string][]byte `json:"overlay"`
|
||||
}
|
||||
|
||||
// DriverResponse defines the schema of a response from an external
|
||||
// driver program, providing the results of a query for package
|
||||
// metadata. The driver program must write a JSON-encoded
|
||||
// DriverResponse message to its standard output.
|
||||
//
|
||||
// See the package documentation for an overview.
|
||||
type DriverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the DriverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*Package
|
||||
|
||||
// GoVersion is the minor version number used by the driver
|
||||
// (e.g. the go command on the PATH) when selecting .go files.
|
||||
// Zero means unknown.
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*DriverResponse, error)
|
||||
|
||||
// findExternalDriver returns the file path of a tool that supplies
|
||||
// the build system package structure, or "" if not found."
|
||||
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
||||
@@ -64,8 +103,8 @@ func findExternalDriver(cfg *Config) driver {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return func(cfg *Config, words ...string) (*driverResponse, error) {
|
||||
req, err := json.Marshal(driverRequest{
|
||||
return func(cfg *Config, words ...string) (*DriverResponse, error) {
|
||||
req, err := json.Marshal(DriverRequest{
|
||||
Mode: cfg.Mode,
|
||||
Env: cfg.Env,
|
||||
BuildFlags: cfg.BuildFlags,
|
||||
@@ -92,7 +131,7 @@ func findExternalDriver(cfg *Config) driver {
|
||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
|
||||
}
|
||||
|
||||
var response driverResponse
|
||||
var response DriverResponse
|
||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
35
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
35
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
@@ -35,23 +35,23 @@ type goTooOldError struct {
|
||||
error
|
||||
}
|
||||
|
||||
// responseDeduper wraps a driverResponse, deduplicating its contents.
|
||||
// responseDeduper wraps a DriverResponse, deduplicating its contents.
|
||||
type responseDeduper struct {
|
||||
seenRoots map[string]bool
|
||||
seenPackages map[string]*Package
|
||||
dr *driverResponse
|
||||
dr *DriverResponse
|
||||
}
|
||||
|
||||
func newDeduper() *responseDeduper {
|
||||
return &responseDeduper{
|
||||
dr: &driverResponse{},
|
||||
dr: &DriverResponse{},
|
||||
seenRoots: map[string]bool{},
|
||||
seenPackages: map[string]*Package{},
|
||||
}
|
||||
}
|
||||
|
||||
// addAll fills in r with a driverResponse.
|
||||
func (r *responseDeduper) addAll(dr *driverResponse) {
|
||||
// addAll fills in r with a DriverResponse.
|
||||
func (r *responseDeduper) addAll(dr *DriverResponse) {
|
||||
for _, pkg := range dr.Packages {
|
||||
r.addPackage(pkg)
|
||||
}
|
||||
@@ -128,7 +128,7 @@ func (state *golistState) mustGetEnv() map[string]string {
|
||||
// goListDriver uses the go list command to interpret the patterns and produce
|
||||
// the build system package structure.
|
||||
// See driver for more details.
|
||||
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
func goListDriver(cfg *Config, patterns ...string) (_ *DriverResponse, err error) {
|
||||
// Make sure that any asynchronous go commands are killed when we return.
|
||||
parentCtx := cfg.Context
|
||||
if parentCtx == nil {
|
||||
@@ -146,16 +146,18 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
||||
}
|
||||
|
||||
// Fill in response.Sizes asynchronously if necessary.
|
||||
var sizeserr error
|
||||
var sizeswg sync.WaitGroup
|
||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
||||
sizeswg.Add(1)
|
||||
errCh := make(chan error)
|
||||
go func() {
|
||||
compiler, arch, err := packagesdriver.GetSizesForArgsGolist(ctx, state.cfgInvocation(), cfg.gocmdRunner)
|
||||
sizeserr = err
|
||||
response.dr.Compiler = compiler
|
||||
response.dr.Arch = arch
|
||||
sizeswg.Done()
|
||||
errCh <- err
|
||||
}()
|
||||
defer func() {
|
||||
if sizesErr := <-errCh; sizesErr != nil {
|
||||
err = sizesErr
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -208,10 +210,7 @@ extractQueries:
|
||||
}
|
||||
}
|
||||
|
||||
sizeswg.Wait()
|
||||
if sizeserr != nil {
|
||||
return nil, sizeserr
|
||||
}
|
||||
// (We may yet return an error due to defer.)
|
||||
return response.dr, nil
|
||||
}
|
||||
|
||||
@@ -266,7 +265,7 @@ func (state *golistState) runContainsQueries(response *responseDeduper, queries
|
||||
|
||||
// adhocPackage attempts to load or construct an ad-hoc package for a given
|
||||
// query, if the original call to the driver produced inadequate results.
|
||||
func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
|
||||
func (state *golistState) adhocPackage(pattern, query string) (*DriverResponse, error) {
|
||||
response, err := state.createDriverResponse(query)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -357,7 +356,7 @@ func otherFiles(p *jsonPackage) [][]string {
|
||||
|
||||
// createDriverResponse uses the "go list" command to expand the pattern
|
||||
// words and return a response for the specified packages.
|
||||
func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
|
||||
func (state *golistState) createDriverResponse(words ...string) (*DriverResponse, error) {
|
||||
// go list uses the following identifiers in ImportPath and Imports:
|
||||
//
|
||||
// "p" -- importable package or main (command)
|
||||
@@ -384,7 +383,7 @@ func (state *golistState) createDriverResponse(words ...string) (*driverResponse
|
||||
pkgs := make(map[string]*Package)
|
||||
additionalErrors := make(map[string][]Error)
|
||||
// Decode the JSON and convert it to Package form.
|
||||
response := &driverResponse{
|
||||
response := &DriverResponse{
|
||||
GoVersion: goVersion,
|
||||
}
|
||||
for dec := json.NewDecoder(buf); dec.More(); {
|
||||
|
||||
46
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
46
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@@ -206,43 +206,6 @@ type Config struct {
|
||||
Overlay map[string][]byte
|
||||
}
|
||||
|
||||
// driver is the type for functions that query the build system for the
|
||||
// packages named by the patterns.
|
||||
type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
|
||||
|
||||
// driverResponse contains the results for a driver query.
|
||||
type driverResponse struct {
|
||||
// NotHandled is returned if the request can't be handled by the current
|
||||
// driver. If an external driver returns a response with NotHandled, the
|
||||
// rest of the driverResponse is ignored, and go/packages will fallback
|
||||
// to the next driver. If go/packages is extended in the future to support
|
||||
// lists of multiple drivers, go/packages will fall back to the next driver.
|
||||
NotHandled bool
|
||||
|
||||
// Compiler and Arch are the arguments pass of types.SizesFor
|
||||
// to get a types.Sizes to use when type checking.
|
||||
Compiler string
|
||||
Arch string
|
||||
|
||||
// Roots is the set of package IDs that make up the root packages.
|
||||
// We have to encode this separately because when we encode a single package
|
||||
// we cannot know if it is one of the roots as that requires knowledge of the
|
||||
// graph it is part of.
|
||||
Roots []string `json:",omitempty"`
|
||||
|
||||
// Packages is the full set of packages in the graph.
|
||||
// The packages are not connected into a graph.
|
||||
// The Imports if populated will be stubs that only have their ID set.
|
||||
// Imports will be connected and then type and syntax information added in a
|
||||
// later pass (see refine).
|
||||
Packages []*Package
|
||||
|
||||
// GoVersion is the minor version number used by the driver
|
||||
// (e.g. the go command on the PATH) when selecting .go files.
|
||||
// Zero means unknown.
|
||||
GoVersion int
|
||||
}
|
||||
|
||||
// Load loads and returns the Go packages named by the given patterns.
|
||||
//
|
||||
// Config specifies loading options;
|
||||
@@ -291,7 +254,7 @@ func Load(cfg *Config, patterns ...string) ([]*Package, error) {
|
||||
// no external driver, or the driver returns a response with NotHandled set,
|
||||
// defaultDriver will fall back to the go list driver.
|
||||
// The boolean result indicates that an external driver handled the request.
|
||||
func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, error) {
|
||||
func defaultDriver(cfg *Config, patterns ...string) (*DriverResponse, bool, error) {
|
||||
if driver := findExternalDriver(cfg); driver != nil {
|
||||
response, err := driver(cfg, patterns...)
|
||||
if err != nil {
|
||||
@@ -303,7 +266,10 @@ func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, bool, erro
|
||||
}
|
||||
|
||||
response, err := goListDriver(cfg, patterns...)
|
||||
return response, false, err
|
||||
if err != nil {
|
||||
return nil, false, err
|
||||
}
|
||||
return response, false, nil
|
||||
}
|
||||
|
||||
// A Package describes a loaded Go package.
|
||||
@@ -648,7 +614,7 @@ func newLoader(cfg *Config) *loader {
|
||||
|
||||
// refine connects the supplied packages into a graph and then adds type
|
||||
// and syntax information as requested by the LoadMode.
|
||||
func (ld *loader) refine(response *driverResponse) ([]*Package, error) {
|
||||
func (ld *loader) refine(response *DriverResponse) ([]*Package, error) {
|
||||
roots := response.Roots
|
||||
rootMap := make(map[string]int, len(roots))
|
||||
for i, root := range roots {
|
||||
|
||||
7
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
7
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
@@ -224,6 +224,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte
|
||||
|
||||
// Gather the relevant packages from the manifest.
|
||||
items := make([]GetPackagesItem, r.uint64())
|
||||
uniquePkgPaths := make(map[string]bool)
|
||||
for i := range items {
|
||||
pkgPathOff := r.uint64()
|
||||
pkgPath := p.stringAt(pkgPathOff)
|
||||
@@ -248,6 +249,12 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte
|
||||
}
|
||||
|
||||
items[i].nameIndex = nameIndex
|
||||
|
||||
uniquePkgPaths[pkgPath] = true
|
||||
}
|
||||
// Debugging #63822; hypothesis: there are duplicate PkgPaths.
|
||||
if len(uniquePkgPaths) != len(items) {
|
||||
reportf("found duplicate PkgPaths while reading export data manifest: %v", items)
|
||||
}
|
||||
|
||||
// Request packages all at once from the client,
|
||||
|
||||
320
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
320
vendor/golang.org/x/tools/internal/gopathwalk/walk.go
generated
vendored
@@ -9,11 +9,13 @@ package gopathwalk
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
@@ -21,8 +23,13 @@ import (
|
||||
type Options struct {
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
// Search module caches. Also disables legacy goimports ignore rules.
|
||||
ModulesEnabled bool
|
||||
|
||||
// Maximum number of concurrent calls to user-provided callbacks,
|
||||
// or 0 for GOMAXPROCS.
|
||||
Concurrency int
|
||||
}
|
||||
|
||||
// RootType indicates the type of a Root.
|
||||
@@ -43,19 +50,28 @@ type Root struct {
|
||||
Type RootType
|
||||
}
|
||||
|
||||
// Walk walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// Walk concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
//
|
||||
// For each package found, add will be called with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
//
|
||||
// Unlike filepath.WalkDir, Walk follows symbolic links
|
||||
// (while guarding against cycles).
|
||||
func Walk(roots []Root, add func(root Root, dir string), opts Options) {
|
||||
WalkSkip(roots, add, func(Root, string) bool { return false }, opts)
|
||||
}
|
||||
|
||||
// WalkSkip walks Go source directories ($GOROOT, $GOPATH, etc) to find packages.
|
||||
// WalkSkip concurrently walks Go source directories ($GOROOT, $GOPATH, etc) to
|
||||
// find packages.
|
||||
//
|
||||
// For each package found, add will be called with the absolute
|
||||
// paths of the containing source directory and the package directory.
|
||||
// For each directory that will be scanned, skip will be called
|
||||
// with the absolute paths of the containing source directory and the directory.
|
||||
// If skip returns false on a directory it will be processed.
|
||||
//
|
||||
// Unlike filepath.WalkDir, WalkSkip follows symbolic links
|
||||
// (while guarding against cycles).
|
||||
func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root, dir string) bool, opts Options) {
|
||||
for _, root := range roots {
|
||||
walkDir(root, add, skip, opts)
|
||||
@@ -64,45 +80,51 @@ func WalkSkip(roots []Root, add func(root Root, dir string), skip func(root Root
|
||||
|
||||
// walkDir creates a walker and starts fastwalk with this walker.
|
||||
func walkDir(root Root, add func(Root, string), skip func(root Root, dir string) bool, opts Options) {
|
||||
if opts.Logf == nil {
|
||||
opts.Logf = func(format string, args ...interface{}) {}
|
||||
}
|
||||
if _, err := os.Stat(root.Path); os.IsNotExist(err) {
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
}
|
||||
opts.Logf("skipping nonexistent directory: %v", root.Path)
|
||||
return
|
||||
}
|
||||
start := time.Now()
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("scanning %s", root.Path)
|
||||
}
|
||||
opts.Logf("scanning %s", root.Path)
|
||||
|
||||
concurrency := opts.Concurrency
|
||||
if concurrency == 0 {
|
||||
// The walk be either CPU-bound or I/O-bound, depending on what the
|
||||
// caller-supplied add function does and the details of the user's platform
|
||||
// and machine. Rather than trying to fine-tune the concurrency level for a
|
||||
// specific environment, we default to GOMAXPROCS: it is likely to be a good
|
||||
// choice for a CPU-bound add function, and if it is instead I/O-bound, then
|
||||
// dealing with I/O saturation is arguably the job of the kernel and/or
|
||||
// runtime. (Oversaturating I/O seems unlikely to harm performance as badly
|
||||
// as failing to saturate would.)
|
||||
concurrency = runtime.GOMAXPROCS(0)
|
||||
}
|
||||
w := &walker{
|
||||
root: root,
|
||||
add: add,
|
||||
skip: skip,
|
||||
opts: opts,
|
||||
added: make(map[string]bool),
|
||||
root: root,
|
||||
add: add,
|
||||
skip: skip,
|
||||
opts: opts,
|
||||
sem: make(chan struct{}, concurrency),
|
||||
}
|
||||
w.init()
|
||||
|
||||
// Add a trailing path separator to cause filepath.WalkDir to traverse symlinks.
|
||||
w.sem <- struct{}{}
|
||||
path := root.Path
|
||||
if len(path) == 0 {
|
||||
path = "." + string(filepath.Separator)
|
||||
} else if !os.IsPathSeparator(path[len(path)-1]) {
|
||||
path = path + string(filepath.Separator)
|
||||
if path == "" {
|
||||
path = "."
|
||||
}
|
||||
if fi, err := os.Lstat(path); err == nil {
|
||||
w.walk(path, nil, fs.FileInfoToDirEntry(fi))
|
||||
} else {
|
||||
w.opts.Logf("scanning directory %v: %v", root.Path, err)
|
||||
}
|
||||
<-w.sem
|
||||
w.walking.Wait()
|
||||
|
||||
if err := filepath.WalkDir(path, w.walk); err != nil {
|
||||
logf := opts.Logf
|
||||
if logf == nil {
|
||||
logf = log.Printf
|
||||
}
|
||||
logf("scanning directory %v: %v", root.Path, err)
|
||||
}
|
||||
|
||||
if opts.Logf != nil {
|
||||
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
opts.Logf("scanned %s in %v", root.Path, time.Since(start))
|
||||
}
|
||||
|
||||
// walker is the callback for fastwalk.Walk.
|
||||
@@ -112,10 +134,18 @@ type walker struct {
|
||||
skip func(Root, string) bool // The callback that will be invoked for every dir. dir is skipped if it returns true.
|
||||
opts Options // Options passed to Walk by the user.
|
||||
|
||||
pathSymlinks []os.FileInfo
|
||||
ignoredDirs []string
|
||||
walking sync.WaitGroup
|
||||
sem chan struct{} // Channel of semaphore tokens; send to acquire, receive to release.
|
||||
ignoredDirs []string
|
||||
|
||||
added map[string]bool
|
||||
added sync.Map // map[string]bool
|
||||
}
|
||||
|
||||
// A symlinkList is a linked list of os.FileInfos for parent directories
|
||||
// reached via symlinks.
|
||||
type symlinkList struct {
|
||||
info os.FileInfo
|
||||
prev *symlinkList
|
||||
}
|
||||
|
||||
// init initializes the walker based on its Options
|
||||
@@ -132,9 +162,7 @@ func (w *walker) init() {
|
||||
for _, p := range ignoredPaths {
|
||||
full := filepath.Join(w.root.Path, p)
|
||||
w.ignoredDirs = append(w.ignoredDirs, full)
|
||||
if w.opts.Logf != nil {
|
||||
w.opts.Logf("Directory added to ignore list: %s", full)
|
||||
}
|
||||
w.opts.Logf("Directory added to ignore list: %s", full)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,12 +172,10 @@ func (w *walker) init() {
|
||||
func (w *walker) getIgnoredDirs(path string) []string {
|
||||
file := filepath.Join(path, ".goimportsignore")
|
||||
slurp, err := os.ReadFile(file)
|
||||
if w.opts.Logf != nil {
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
} else {
|
||||
w.opts.Logf("Read %s", file)
|
||||
}
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
} else {
|
||||
w.opts.Logf("Read %s", file)
|
||||
}
|
||||
if err != nil {
|
||||
return nil
|
||||
@@ -183,63 +209,22 @@ func (w *walker) shouldSkipDir(dir string) bool {
|
||||
|
||||
// walk walks through the given path.
|
||||
//
|
||||
// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored:
|
||||
// walk returns only nil or fs.SkipDir.
|
||||
func (w *walker) walk(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
// We have no way to report errors back through Walk or WalkSkip,
|
||||
// so just log and ignore them.
|
||||
if w.opts.Logf != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
}
|
||||
if d == nil {
|
||||
// Nothing more to do: the error prevents us from knowing
|
||||
// what path even represents.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if d.Type().IsRegular() {
|
||||
if !strings.HasSuffix(path, ".go") {
|
||||
return nil
|
||||
}
|
||||
|
||||
dir := filepath.Dir(path)
|
||||
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||
// Doesn't make sense to have regular files
|
||||
// directly in your $GOPATH/src or $GOROOT/src.
|
||||
return nil
|
||||
}
|
||||
|
||||
if !w.added[dir] {
|
||||
w.add(w.root, dir)
|
||||
w.added[dir] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.IsDir() {
|
||||
base := filepath.Base(path)
|
||||
if base == "" || base[0] == '.' || base[0] == '_' ||
|
||||
base == "testdata" ||
|
||||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
|
||||
(!w.opts.ModulesEnabled && base == "node_modules") {
|
||||
return fs.SkipDir
|
||||
}
|
||||
if w.shouldSkipDir(path) {
|
||||
return fs.SkipDir
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Errors are logged if w.opts.Logf is non-nil, but otherwise ignored.
|
||||
func (w *walker) walk(path string, pathSymlinks *symlinkList, d fs.DirEntry) {
|
||||
if d.Type()&os.ModeSymlink != 0 {
|
||||
// Walk the symlink's target rather than the symlink itself.
|
||||
//
|
||||
// (Note that os.Stat, unlike the lower-lever os.Readlink,
|
||||
// follows arbitrarily many layers of symlinks, so it will eventually
|
||||
// reach either a non-symlink or a nonexistent target.)
|
||||
//
|
||||
// TODO(bcmills): 'go list all' itself ignores symlinks within GOROOT/src
|
||||
// and GOPATH/src. Do we really need to traverse them here? If so, why?
|
||||
|
||||
fi, err := os.Stat(path)
|
||||
if err != nil || !fi.IsDir() {
|
||||
// Not a directory. Just walk the file (or broken link) and be done.
|
||||
return w.walk(path, fs.FileInfoToDirEntry(fi), err)
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Avoid walking symlink cycles: if we have already followed a symlink to
|
||||
@@ -249,83 +234,104 @@ func (w *walker) walk(path string, d fs.DirEntry, err error) error {
|
||||
// the number of extra stat calls we make if we *don't* encounter a cycle.
|
||||
// Since we don't actually expect to encounter symlink cycles in practice,
|
||||
// this seems like the right tradeoff.
|
||||
for _, parent := range w.pathSymlinks {
|
||||
if os.SameFile(fi, parent) {
|
||||
return nil
|
||||
for parent := pathSymlinks; parent != nil; parent = parent.prev {
|
||||
if os.SameFile(fi, parent.info) {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
w.pathSymlinks = append(w.pathSymlinks, fi)
|
||||
defer func() {
|
||||
w.pathSymlinks = w.pathSymlinks[:len(w.pathSymlinks)-1]
|
||||
}()
|
||||
pathSymlinks = &symlinkList{
|
||||
info: fi,
|
||||
prev: pathSymlinks,
|
||||
}
|
||||
d = fs.FileInfoToDirEntry(fi)
|
||||
}
|
||||
|
||||
// On some platforms the OS (or the Go os package) sometimes fails to
|
||||
// resolve directory symlinks before a trailing slash
|
||||
// (even though POSIX requires it to do so).
|
||||
//
|
||||
// On macOS that failure may be caused by a known libc/kernel bug;
|
||||
// see https://go.dev/issue/59586.
|
||||
//
|
||||
// On Windows before Go 1.21, it may be caused by a bug in
|
||||
// os.Lstat (fixed in https://go.dev/cl/463177).
|
||||
//
|
||||
// Since we need to handle this explicitly on broken platforms anyway,
|
||||
// it is simplest to just always do that and not rely on POSIX pathname
|
||||
// resolution to walk the directory (such as by calling WalkDir with
|
||||
// a trailing slash appended to the path).
|
||||
//
|
||||
// Instead, we make a sequence of walk calls — directly and through
|
||||
// recursive calls to filepath.WalkDir — simulating what WalkDir would do
|
||||
// if the symlink were a regular directory.
|
||||
|
||||
// First we call walk on the path as a directory
|
||||
// (instead of a symlink).
|
||||
err = w.walk(path, fs.FileInfoToDirEntry(fi), nil)
|
||||
if err == fs.SkipDir {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
// This should be impossible, but handle it anyway in case
|
||||
// walk is changed to return other errors.
|
||||
return err
|
||||
if d.Type().IsRegular() {
|
||||
if !strings.HasSuffix(path, ".go") {
|
||||
return
|
||||
}
|
||||
|
||||
// Now read the directory and walk its entries.
|
||||
ents, err := os.ReadDir(path)
|
||||
dir := filepath.Dir(path)
|
||||
if dir == w.root.Path && (w.root.Type == RootGOROOT || w.root.Type == RootGOPATH) {
|
||||
// Doesn't make sense to have regular files
|
||||
// directly in your $GOPATH/src or $GOROOT/src.
|
||||
//
|
||||
// TODO(bcmills): there are many levels of directory within
|
||||
// RootModuleCache where this also wouldn't make sense,
|
||||
// Can we generalize this to any directory without a corresponding
|
||||
// import path?
|
||||
return
|
||||
}
|
||||
|
||||
if _, dup := w.added.LoadOrStore(dir, true); !dup {
|
||||
w.add(w.root, dir)
|
||||
}
|
||||
}
|
||||
|
||||
if !d.IsDir() {
|
||||
return
|
||||
}
|
||||
|
||||
base := filepath.Base(path)
|
||||
if base == "" || base[0] == '.' || base[0] == '_' ||
|
||||
base == "testdata" ||
|
||||
(w.root.Type == RootGOROOT && w.opts.ModulesEnabled && base == "vendor") ||
|
||||
(!w.opts.ModulesEnabled && base == "node_modules") ||
|
||||
w.shouldSkipDir(path) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read the directory and walk its entries.
|
||||
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
return
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
for {
|
||||
// We impose an arbitrary limit on the number of ReadDir results per
|
||||
// directory to limit the amount of memory consumed for stale or upcoming
|
||||
// directory entries. The limit trades off CPU (number of syscalls to read
|
||||
// the whole directory) against RAM (reachable directory entries other than
|
||||
// the one currently being processed).
|
||||
//
|
||||
// Since we process the directories recursively, we will end up maintaining
|
||||
// a slice of entries for each level of the directory tree.
|
||||
// (Compare https://go.dev/issue/36197.)
|
||||
ents, err := f.ReadDir(1024)
|
||||
if err != nil {
|
||||
// Report the ReadDir error, as filepath.WalkDir would do.
|
||||
err = w.walk(path, fs.FileInfoToDirEntry(fi), err)
|
||||
if err == fs.SkipDir {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err // Again, should be impossible.
|
||||
if err != io.EOF {
|
||||
w.opts.Logf("%v", err)
|
||||
}
|
||||
// Fall through and iterate over whatever entries we did manage to get.
|
||||
break
|
||||
}
|
||||
|
||||
for _, d := range ents {
|
||||
nextPath := filepath.Join(path, d.Name())
|
||||
if d.IsDir() {
|
||||
// We want to walk the whole directory tree rooted at nextPath,
|
||||
// not just the single entry for the directory.
|
||||
err := filepath.WalkDir(nextPath, w.walk)
|
||||
if err != nil && w.opts.Logf != nil {
|
||||
w.opts.Logf("%v", err)
|
||||
}
|
||||
} else {
|
||||
err := w.walk(nextPath, d, nil)
|
||||
if err == fs.SkipDir {
|
||||
// Skip the rest of the entries in the parent directory of nextPath
|
||||
// (that is, path itself).
|
||||
break
|
||||
} else if err != nil {
|
||||
return err // Again, should be impossible.
|
||||
select {
|
||||
case w.sem <- struct{}{}:
|
||||
// Got a new semaphore token, so we can traverse the directory concurrently.
|
||||
d := d
|
||||
w.walking.Add(1)
|
||||
go func() {
|
||||
defer func() {
|
||||
<-w.sem
|
||||
w.walking.Done()
|
||||
}()
|
||||
w.walk(nextPath, pathSymlinks, d)
|
||||
}()
|
||||
continue
|
||||
|
||||
default:
|
||||
// No tokens available, so traverse serially.
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Not a file, regular directory, or symlink; skip.
|
||||
return nil
|
||||
w.walk(nextPath, pathSymlinks, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
134
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
134
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
@@ -13,6 +13,7 @@ import (
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
@@ -700,20 +701,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func PrimeCache(ctx context.Context, env *ProcessEnv) error {
|
||||
func PrimeCache(ctx context.Context, resolver Resolver) error {
|
||||
// Fully scan the disk for directories, but don't actually read any Go files.
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true
|
||||
rootFound: func(root gopathwalk.Root) bool {
|
||||
// See getCandidatePkgs: walking GOROOT is apparently expensive and
|
||||
// unnecessary.
|
||||
return root.Type != gopathwalk.RootGOROOT
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
// packageNameLoaded and exportsLoaded must never be called.
|
||||
}
|
||||
return getCandidatePkgs(ctx, callback, "", "", env)
|
||||
|
||||
return resolver.scan(ctx, callback)
|
||||
}
|
||||
|
||||
func candidateImportName(pkg *pkg) string {
|
||||
@@ -827,16 +829,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
|
||||
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
|
||||
}
|
||||
|
||||
var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"}
|
||||
// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate
|
||||
// imports when doing cross-platform development.
|
||||
var requiredGoEnvVars = []string{
|
||||
"GO111MODULE",
|
||||
"GOFLAGS",
|
||||
"GOINSECURE",
|
||||
"GOMOD",
|
||||
"GOMODCACHE",
|
||||
"GONOPROXY",
|
||||
"GONOSUMDB",
|
||||
"GOPATH",
|
||||
"GOPROXY",
|
||||
"GOROOT",
|
||||
"GOSUMDB",
|
||||
"GOWORK",
|
||||
}
|
||||
|
||||
// ProcessEnv contains environment variables and settings that affect the use of
|
||||
// the go command, the go/build package, etc.
|
||||
//
|
||||
// ...a ProcessEnv *also* overwrites its Env along with derived state in the
|
||||
// form of the resolver. And because it is lazily initialized, an env may just
|
||||
// be broken and unusable, but there is no way for the caller to detect that:
|
||||
// all queries will just fail.
|
||||
//
|
||||
// TODO(rfindley): refactor this package so that this type (perhaps renamed to
|
||||
// just Env or Config) is an immutable configuration struct, to be exchanged
|
||||
// for an initialized object via a constructor that returns an error. Perhaps
|
||||
// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where
|
||||
// resolver is a concrete type used for resolving imports. Via this
|
||||
// refactoring, we can avoid the need to call ProcessEnv.init and
|
||||
// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this
|
||||
// these are misused. Also, we'd delegate the caller the decision of how to
|
||||
// handle a broken environment.
|
||||
type ProcessEnv struct {
|
||||
GocmdRunner *gocommand.Runner
|
||||
|
||||
BuildFlags []string
|
||||
ModFlag string
|
||||
ModFile string
|
||||
|
||||
// SkipPathInScan returns true if the path should be skipped from scans of
|
||||
// the RootCurrentModule root type. The function argument is a clean,
|
||||
@@ -846,7 +877,7 @@ type ProcessEnv struct {
|
||||
// Env overrides the OS environment, and can be used to specify
|
||||
// GOPROXY, GO111MODULE, etc. PATH cannot be set here, because
|
||||
// exec.Command will not honor it.
|
||||
// Specifying all of RequiredGoEnvVars avoids a call to `go env`.
|
||||
// Specifying all of requiredGoEnvVars avoids a call to `go env`.
|
||||
Env map[string]string
|
||||
|
||||
WorkingDir string
|
||||
@@ -854,9 +885,17 @@ type ProcessEnv struct {
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
initialized bool
|
||||
// If set, ModCache holds a shared cache of directory info to use across
|
||||
// multiple ProcessEnvs.
|
||||
ModCache *DirInfoCache
|
||||
|
||||
resolver Resolver
|
||||
initialized bool // see TODO above
|
||||
|
||||
// resolver and resolverErr are lazily evaluated (see GetResolver).
|
||||
// This is unclean, but see the big TODO in the docstring for ProcessEnv
|
||||
// above: for now, we can't be sure that the ProcessEnv is fully initialized.
|
||||
resolver Resolver
|
||||
resolverErr error
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) goEnv() (map[string]string, error) {
|
||||
@@ -936,20 +975,31 @@ func (e *ProcessEnv) env() []string {
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) GetResolver() (Resolver, error) {
|
||||
if e.resolver != nil {
|
||||
return e.resolver, nil
|
||||
}
|
||||
if err := e.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
e.resolver = newGopathResolver(e)
|
||||
return e.resolver, nil
|
||||
|
||||
if e.resolver == nil && e.resolverErr == nil {
|
||||
// TODO(rfindley): we should only use a gopathResolver here if the working
|
||||
// directory is actually *in* GOPATH. (I seem to recall an open gopls issue
|
||||
// for this behavior, but I can't find it).
|
||||
//
|
||||
// For gopls, we can optionally explicitly choose a resolver type, since we
|
||||
// already know the view type.
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
e.resolver = newGopathResolver(e)
|
||||
} else {
|
||||
e.resolver, e.resolverErr = newModuleResolver(e, e.ModCache)
|
||||
}
|
||||
}
|
||||
e.resolver = newModuleResolver(e)
|
||||
return e.resolver, nil
|
||||
|
||||
return e.resolver, e.resolverErr
|
||||
}
|
||||
|
||||
// buildContext returns the build.Context to use for matching files.
|
||||
//
|
||||
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
|
||||
// development.
|
||||
func (e *ProcessEnv) buildContext() (*build.Context, error) {
|
||||
ctx := build.Default
|
||||
goenv, err := e.goEnv()
|
||||
@@ -1029,15 +1079,23 @@ func addStdlibCandidates(pass *pass, refs references) error {
|
||||
type Resolver interface {
|
||||
// loadPackageNames loads the package names in importPaths.
|
||||
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
|
||||
|
||||
// scan works with callback to search for packages. See scanCallback for details.
|
||||
scan(ctx context.Context, callback *scanCallback) error
|
||||
|
||||
// loadExports returns the set of exported symbols in the package at dir.
|
||||
// loadExports may be called concurrently.
|
||||
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error)
|
||||
|
||||
// scoreImportPath returns the relevance for an import path.
|
||||
scoreImportPath(ctx context.Context, path string) float64
|
||||
|
||||
ClearForNewScan()
|
||||
// ClearForNewScan returns a new Resolver based on the receiver that has
|
||||
// cleared its internal caches of directory contents.
|
||||
//
|
||||
// The new resolver should be primed and then set via
|
||||
// [ProcessEnv.UpdateResolver].
|
||||
ClearForNewScan() Resolver
|
||||
}
|
||||
|
||||
// A scanCallback controls a call to scan and receives its results.
|
||||
@@ -1120,7 +1178,7 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
|
||||
go func(pkgName string, symbols map[string]bool) {
|
||||
defer wg.Done()
|
||||
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename)
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
|
||||
|
||||
if err != nil {
|
||||
firstErrOnce.Do(func() {
|
||||
@@ -1151,6 +1209,17 @@ func addExternalCandidates(ctx context.Context, pass *pass, refs references, fil
|
||||
}()
|
||||
|
||||
for result := range results {
|
||||
// Don't offer completions that would shadow predeclared
|
||||
// names, such as github.com/coreos/etcd/error.
|
||||
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
|
||||
// Ideally we would skip this candidate only
|
||||
// if the predeclared name is actually
|
||||
// referenced by the file, but that's a lot
|
||||
// trickier to compute and would still create
|
||||
// an import that is likely to surprise the
|
||||
// user before long.
|
||||
continue
|
||||
}
|
||||
pass.addCandidate(result.imp, result.pkg)
|
||||
}
|
||||
return firstErr
|
||||
@@ -1193,31 +1262,22 @@ func ImportPathToAssumedName(importPath string) string {
|
||||
type gopathResolver struct {
|
||||
env *ProcessEnv
|
||||
walked bool
|
||||
cache *dirInfoCache
|
||||
cache *DirInfoCache
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans.
|
||||
}
|
||||
|
||||
func newGopathResolver(env *ProcessEnv) *gopathResolver {
|
||||
r := &gopathResolver{
|
||||
env: env,
|
||||
cache: &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
},
|
||||
env: env,
|
||||
cache: NewDirInfoCache(),
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *gopathResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.cache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.walked = false
|
||||
r.scanSema <- struct{}{}
|
||||
func (r *gopathResolver) ClearForNewScan() Resolver {
|
||||
return newGopathResolver(r.env)
|
||||
}
|
||||
|
||||
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
@@ -1538,7 +1598,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
|
||||
|
||||
// findImport searches for a package with the given symbols.
|
||||
// If no package is found, findImport returns ("", false, nil)
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
|
||||
// Sort the candidates by their import package length,
|
||||
// assuming that shorter package names are better than long
|
||||
// ones. Note that this sorts by the de-vendored name, so
|
||||
|
||||
2
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
2
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
@@ -236,7 +236,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
|
||||
src = src[:len(src)-len("}\n")]
|
||||
// Gofmt has also indented the function body one level.
|
||||
// Remove that indent.
|
||||
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
|
||||
src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n"))
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
|
||||
283
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
283
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
@@ -23,49 +23,88 @@ import (
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning
|
||||
// as fast as possible, which is desirable for a call to goimports from the
|
||||
// command line, but it doesn't work as well for gopls, where it suffers from
|
||||
// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216),
|
||||
// both caused by populating the cache, albeit in slightly different ways.
|
||||
//
|
||||
// A high level list of TODOs:
|
||||
// - Optimize the scan itself, as there is some redundancy statting and
|
||||
// reading go.mod files.
|
||||
// - Invert the relationship between ProcessEnv and Resolver (see the
|
||||
// docstring of ProcessEnv).
|
||||
// - Make it easier to use an external resolver implementation.
|
||||
//
|
||||
// Smaller TODOs are annotated in the code below.
|
||||
|
||||
// ModuleResolver implements the Resolver interface for a workspace using
|
||||
// modules.
|
||||
//
|
||||
// A goal of the ModuleResolver is to invoke the Go command as little as
|
||||
// possible. To this end, it runs the Go command only for listing module
|
||||
// information (i.e. `go list -m -e -json ...`). Package scanning, the process
|
||||
// of loading package information for the modules, is implemented internally
|
||||
// via the scan method.
|
||||
//
|
||||
// It has two types of state: the state derived from the go command, which
|
||||
// is populated by init, and the state derived from scans, which is populated
|
||||
// via scan. A root is considered scanned if it has been walked to discover
|
||||
// directories. However, if the scan did not require additional information
|
||||
// from the directory (such as package name or exports), the directory
|
||||
// information itself may be partially populated. It will be lazily filled in
|
||||
// as needed by scans, using the scanCallback.
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
moduleCacheDir string
|
||||
dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
roots []gopathwalk.Root
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
|
||||
scannedRoots map[gopathwalk.Root]bool
|
||||
env *ProcessEnv
|
||||
|
||||
initialized bool
|
||||
mains []*gocommand.ModuleJSON
|
||||
mainByDir map[string]*gocommand.ModuleJSON
|
||||
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*gocommand.ModuleJSON // ...or number of path components in their Dir.
|
||||
// Module state, populated during construction
|
||||
dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory
|
||||
moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset
|
||||
roots []gopathwalk.Root // roots to scan, in approximate order of importance
|
||||
mains []*gocommand.ModuleJSON // main modules
|
||||
mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots
|
||||
modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path
|
||||
modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir.
|
||||
|
||||
// moduleCacheCache stores information about the module cache.
|
||||
moduleCacheCache *dirInfoCache
|
||||
otherCache *dirInfoCache
|
||||
// Scanning state, populated by scan
|
||||
|
||||
// scanSema prevents concurrent scans, and guards scannedRoots and the cache
|
||||
// fields below (though the caches themselves are concurrency safe).
|
||||
// Receive to acquire, send to release.
|
||||
scanSema chan struct{}
|
||||
scannedRoots map[gopathwalk.Root]bool // if true, root has been walked
|
||||
|
||||
// Caches of directory info, populated by scans and scan callbacks
|
||||
//
|
||||
// moduleCacheCache stores cached information about roots in the module
|
||||
// cache, which are immutable and therefore do not need to be invalidated.
|
||||
//
|
||||
// otherCache stores information about all other roots (even GOROOT), which
|
||||
// may change.
|
||||
moduleCacheCache *DirInfoCache
|
||||
otherCache *DirInfoCache
|
||||
}
|
||||
|
||||
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
|
||||
// newModuleResolver returns a new module-aware goimports resolver.
|
||||
//
|
||||
// Note: use caution when modifying this constructor: changes must also be
|
||||
// reflected in ModuleResolver.ClearForNewScan.
|
||||
func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) {
|
||||
r := &ModuleResolver{
|
||||
env: e,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
r.scanSema <- struct{}{} // release
|
||||
|
||||
goenv, err := r.env.goEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(rfindley): can we refactor to share logic with r.env.invokeGo?
|
||||
inv := gocommand.Invocation{
|
||||
BuildFlags: r.env.BuildFlags,
|
||||
ModFlag: r.env.ModFlag,
|
||||
ModFile: r.env.ModFile,
|
||||
Env: r.env.env(),
|
||||
Logf: r.env.Logf,
|
||||
WorkingDir: r.env.WorkingDir,
|
||||
@@ -77,9 +116,12 @@ func (r *ModuleResolver) init() error {
|
||||
// Module vendor directories are ignored in workspace mode:
|
||||
// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md
|
||||
if len(r.env.Env["GOWORK"]) == 0 {
|
||||
// TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but
|
||||
// they should be available from the ProcessEnv. Can we avoid the redundant
|
||||
// invocation?
|
||||
vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -100,19 +142,14 @@ func (r *ModuleResolver) init() error {
|
||||
// GO111MODULE=on. Other errors are fatal.
|
||||
if err != nil {
|
||||
if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
|
||||
r.moduleCacheDir = gmc
|
||||
} else {
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
return fmt.Errorf("empty GOPATH")
|
||||
}
|
||||
r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod")
|
||||
r.moduleCacheDir = gomodcacheForEnv(goenv)
|
||||
if r.moduleCacheDir == "" {
|
||||
return nil, fmt.Errorf("cannot resolve GOMODCACHE")
|
||||
}
|
||||
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
@@ -141,7 +178,11 @@ func (r *ModuleResolver) init() error {
|
||||
} else {
|
||||
addDep := func(mod *gocommand.ModuleJSON) {
|
||||
if mod.Replace == nil {
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough.
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough
|
||||
// when we encounter it in the module cache scan.
|
||||
//
|
||||
// Including it at a lower index in r.roots than the module cache dir
|
||||
// helps prioritize matches from within existing dependencies.
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
|
||||
} else {
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
|
||||
@@ -158,24 +199,40 @@ func (r *ModuleResolver) init() error {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
// If provided, share the moduleCacheCache.
|
||||
//
|
||||
// TODO(rfindley): The module cache is immutable. However, the loaded
|
||||
// exports do depend on GOOS and GOARCH. Fortunately, the
|
||||
// ProcessEnv.buildContext does not adjust these from build.DefaultContext
|
||||
// (even though it should). So for now, this is OK to share, but we need to
|
||||
// add logic for handling GOOS/GOARCH.
|
||||
r.moduleCacheCache = moduleCacheCache
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
if r.moduleCacheCache == nil {
|
||||
r.moduleCacheCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.moduleCacheCache = NewDirInfoCache()
|
||||
}
|
||||
if r.otherCache == nil {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.otherCache = NewDirInfoCache()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env
|
||||
// map, which must have GOMODCACHE and GOPATH populated.
|
||||
//
|
||||
// TODO(rfindley): this is defensive refactoring.
|
||||
// 1. Is this even relevant anymore? Can't we just read GOMODCACHE.
|
||||
// 2. Use this to separate module cache scanning from other scanning.
|
||||
func gomodcacheForEnv(goenv map[string]string) string {
|
||||
if gmc := goenv["GOMODCACHE"]; gmc != "" {
|
||||
return gmc
|
||||
}
|
||||
r.initialized = true
|
||||
return nil
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(gopaths[0], "/pkg/mod")
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) initAllMods() error {
|
||||
@@ -206,30 +263,82 @@ func (r *ModuleResolver) initAllMods() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
// ClearForNewScan invalidates the last scan.
|
||||
//
|
||||
// It preserves the set of roots, but forgets about the set of directories.
|
||||
// Though it forgets the set of module cache directories, it remembers their
|
||||
// contents, since they are assumed to be immutable.
|
||||
func (r *ModuleResolver) ClearForNewScan() Resolver {
|
||||
<-r.scanSema // acquire r, to guard scannedRoots
|
||||
r2 := &ModuleResolver{
|
||||
env: r.env,
|
||||
dummyVendorMod: r.dummyVendorMod,
|
||||
moduleCacheDir: r.moduleCacheDir,
|
||||
roots: r.roots,
|
||||
mains: r.mains,
|
||||
mainByDir: r.mainByDir,
|
||||
modsByModPath: r.modsByModPath,
|
||||
|
||||
func (r *ModuleResolver) ClearForNewMod() {
|
||||
<-r.scanSema
|
||||
*r = ModuleResolver{
|
||||
env: r.env,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
scannedRoots: make(map[gopathwalk.Root]bool),
|
||||
otherCache: NewDirInfoCache(),
|
||||
moduleCacheCache: r.moduleCacheCache,
|
||||
otherCache: r.otherCache,
|
||||
scanSema: r.scanSema,
|
||||
}
|
||||
r.init()
|
||||
r.scanSema <- struct{}{}
|
||||
r2.scanSema <- struct{}{} // r2 must start released
|
||||
// Invalidate root scans. We don't need to invalidate module cache roots,
|
||||
// because they are immutable.
|
||||
// (We don't support a use case where GOMODCACHE is cleaned in the middle of
|
||||
// e.g. a gopls session: the user must restart gopls to get accurate
|
||||
// imports.)
|
||||
//
|
||||
// Scanning for new directories in GOMODCACHE should be handled elsewhere,
|
||||
// via a call to ScanModuleCache.
|
||||
for _, root := range r.roots {
|
||||
if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] {
|
||||
r2.scannedRoots[root] = true
|
||||
}
|
||||
}
|
||||
r.scanSema <- struct{}{} // release r
|
||||
return r2
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
// ClearModuleInfo invalidates resolver state that depends on go.mod file
|
||||
// contents (essentially, the output of go list -m -json ...).
|
||||
//
|
||||
// Notably, it does not forget directory contents, which are reset
|
||||
// asynchronously via ClearForNewScan.
|
||||
//
|
||||
// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op.
|
||||
//
|
||||
// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods.
|
||||
func (e *ProcessEnv) ClearModuleInfo() {
|
||||
if r, ok := e.resolver.(*ModuleResolver); ok {
|
||||
resolver, resolverErr := newModuleResolver(e, e.ModCache)
|
||||
if resolverErr == nil {
|
||||
<-r.scanSema // acquire (guards caches)
|
||||
resolver.moduleCacheCache = r.moduleCacheCache
|
||||
resolver.otherCache = r.otherCache
|
||||
r.scanSema <- struct{}{} // release
|
||||
}
|
||||
e.resolver = resolver
|
||||
e.resolverErr = resolverErr
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateResolver sets the resolver for the ProcessEnv to use in imports
|
||||
// operations. Only for use with the result of [Resolver.ClearForNewScan].
|
||||
//
|
||||
// TODO(rfindley): this awkward API is a result of the (arguably) inverted
|
||||
// relationship between configuration and state described in the doc comment
|
||||
// for [ProcessEnv].
|
||||
func (e *ProcessEnv) UpdateResolver(r Resolver) {
|
||||
e.resolver = r
|
||||
e.resolverErr = nil
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory from within the main modules
|
||||
// and their dependencies that contains the package at the given import path,
|
||||
// or returns nil, "" if no module is in scope.
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
|
||||
// This can't find packages in the stdlib, but that's harmless for all
|
||||
// the existing code paths.
|
||||
@@ -295,10 +404,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheKeys() []string {
|
||||
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
|
||||
}
|
||||
|
||||
// cachePackageName caches the package name for a dir already in the cache.
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
@@ -367,15 +472,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON
|
||||
return modDir != mod.Dir
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
|
||||
readModName := func(modFile string) string {
|
||||
modBytes, err := os.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
func readModName(modFile string) string {
|
||||
modBytes, err := os.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) {
|
||||
if r.dirInModuleCache(dir) {
|
||||
if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 {
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
@@ -409,11 +514,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool {
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
// TODO(rfindley): shouldn't this use the dirInfoCache?
|
||||
_, packageDir := r.findPackage(path)
|
||||
if packageDir == "" {
|
||||
continue
|
||||
@@ -431,10 +534,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
ctx, done := event.Start(ctx, "imports.ModuleResolver.scan")
|
||||
defer done()
|
||||
|
||||
if err := r.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
processDir := func(info directoryPackageInfo) {
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
if scanned, err := info.reachedStatus(directoryScanned); !scanned || err != nil {
|
||||
@@ -444,18 +543,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.dirFound(pkg) {
|
||||
return
|
||||
}
|
||||
|
||||
pkg.packageName, err = r.cachePackageName(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.packageNameLoaded(pkg) {
|
||||
return
|
||||
}
|
||||
|
||||
_, exports, err := r.loadExports(ctx, pkg, false)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -494,7 +593,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
// Add anything new to the cache, and process it if we're still listening.
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
r.cacheStore(r.scanDirForPackage(root, dir))
|
||||
}
|
||||
@@ -509,9 +607,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.scanSema:
|
||||
case <-r.scanSema: // acquire
|
||||
}
|
||||
defer func() { r.scanSema <- struct{}{} }()
|
||||
defer func() { r.scanSema <- struct{}{} }() // release
|
||||
// We have the lock on r.scannedRoots, and no other scans can run.
|
||||
for _, root := range roots {
|
||||
if ctx.Err() != nil {
|
||||
@@ -613,9 +711,6 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
|
||||
return r.cacheExports(ctx, r.env, info)
|
||||
}
|
||||
|
||||
116
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
116
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
@@ -7,8 +7,12 @@ package imports
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
)
|
||||
|
||||
@@ -39,6 +43,8 @@ const (
|
||||
exportsLoaded
|
||||
)
|
||||
|
||||
// directoryPackageInfo holds (possibly incomplete) information about packages
|
||||
// contained in a given directory.
|
||||
type directoryPackageInfo struct {
|
||||
// status indicates the extent to which this struct has been filled in.
|
||||
status directoryPackageStatus
|
||||
@@ -63,7 +69,10 @@ type directoryPackageInfo struct {
|
||||
packageName string // the package name, as declared in the source.
|
||||
|
||||
// Set when status >= exportsLoaded.
|
||||
|
||||
// TODO(rfindley): it's hard to see this, but exports depend implicitly on
|
||||
// the default build context GOOS and GOARCH.
|
||||
//
|
||||
// We can make this explicit, and key exports by GOOS, GOARCH.
|
||||
exports []string
|
||||
}
|
||||
|
||||
@@ -79,7 +88,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// dirInfoCache is a concurrency safe map for storing information about
|
||||
// DirInfoCache is a concurrency-safe map for storing information about
|
||||
// directories that may contain packages.
|
||||
//
|
||||
// The information in this cache is built incrementally. Entries are initialized in scan.
|
||||
@@ -92,21 +101,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
|
||||
// The information in the cache is not expected to change for the cache's
|
||||
// lifetime, so there is no protection against competing writes. Users should
|
||||
// take care not to hold the cache across changes to the underlying files.
|
||||
//
|
||||
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
|
||||
type dirInfoCache struct {
|
||||
type DirInfoCache struct {
|
||||
mu sync.Mutex
|
||||
// dirs stores information about packages in directories, keyed by absolute path.
|
||||
dirs map[string]*directoryPackageInfo
|
||||
listeners map[*int]cacheListener
|
||||
}
|
||||
|
||||
func NewDirInfoCache() *DirInfoCache {
|
||||
return &DirInfoCache{
|
||||
dirs: make(map[string]*directoryPackageInfo),
|
||||
listeners: make(map[*int]cacheListener),
|
||||
}
|
||||
}
|
||||
|
||||
type cacheListener func(directoryPackageInfo)
|
||||
|
||||
// ScanAndListen calls listener on all the items in the cache, and on anything
|
||||
// newly added. The returned stop function waits for all in-flight callbacks to
|
||||
// finish and blocks new ones.
|
||||
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Flushing out all the callbacks is tricky without knowing how many there
|
||||
@@ -162,8 +176,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener
|
||||
}
|
||||
|
||||
// Store stores the package info for dir.
|
||||
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
d.mu.Lock()
|
||||
// TODO(rfindley, golang/go#59216): should we overwrite an existing entry?
|
||||
// That seems incorrect as the cache should be idempotent.
|
||||
_, old := d.dirs[dir]
|
||||
d.dirs[dir] = &info
|
||||
var listeners []cacheListener
|
||||
@@ -180,7 +196,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
}
|
||||
|
||||
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
||||
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
info, ok := d.dirs[dir]
|
||||
@@ -191,7 +207,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
}
|
||||
|
||||
// Keys returns the keys currently present in d.
|
||||
func (d *dirInfoCache) Keys() (keys []string) {
|
||||
func (d *DirInfoCache) Keys() (keys []string) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for key := range d.dirs {
|
||||
@@ -200,7 +216,7 @@ func (d *dirInfoCache) Keys() (keys []string) {
|
||||
return keys
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
return info.packageName, err
|
||||
}
|
||||
@@ -213,7 +229,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro
|
||||
return info.packageName, info.err
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
if reached, _ := info.reachedStatus(exportsLoaded); reached {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
@@ -234,3 +250,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
|
||||
// ScanModuleCache walks the given directory, which must be a GOMODCACHE value,
|
||||
// for directory package information, storing the results in cache.
|
||||
func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) {
|
||||
// Note(rfindley): it's hard to see, but this function attempts to implement
|
||||
// just the side effects on cache of calling PrimeCache with a ProcessEnv
|
||||
// that has the given dir as its GOMODCACHE.
|
||||
//
|
||||
// Teasing out the control flow, we see that we can avoid any handling of
|
||||
// vendor/ and can infer module info entirely from the path, simplifying the
|
||||
// logic here.
|
||||
|
||||
root := gopathwalk.Root{
|
||||
Path: filepath.Clean(dir),
|
||||
Type: gopathwalk.RootModuleCache,
|
||||
}
|
||||
|
||||
directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
// This is a copy of ModuleResolver.scanDirForPackage, trimmed down to
|
||||
// logic that applies to a module cache directory.
|
||||
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
}
|
||||
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
if len(matches) == 0 {
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("invalid module cache path: %v", subdir),
|
||||
}
|
||||
}
|
||||
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if logf != nil {
|
||||
logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
|
||||
}
|
||||
}
|
||||
importPath := path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
|
||||
modName := readModName(filepath.Join(modDir, "go.mod"))
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
dir: dir,
|
||||
rootType: root.Type,
|
||||
nonCanonicalImportPath: importPath,
|
||||
moduleDir: modDir,
|
||||
moduleName: modName,
|
||||
}
|
||||
}
|
||||
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
info := directoryInfo(root, dir)
|
||||
cache.Store(info.dir, info)
|
||||
}
|
||||
|
||||
skip := func(_ gopathwalk.Root, dir string) bool {
|
||||
// Skip directories that have already been scanned.
|
||||
//
|
||||
// Note that gopathwalk only adds "package" directories, which must contain
|
||||
// a .go file, and all such package directories in the module cache are
|
||||
// immutable. So if we can load a dir, it can be skipped.
|
||||
info, ok := cache.Load(dir)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
packageScanned, _ := info.reachedStatus(directoryScanned)
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true})
|
||||
}
|
||||
|
||||
61
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
61
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
@@ -151,6 +151,7 @@ var stdlib = map[string][]string{
|
||||
"cmp": {
|
||||
"Compare",
|
||||
"Less",
|
||||
"Or",
|
||||
"Ordered",
|
||||
},
|
||||
"compress/bzip2": {
|
||||
@@ -632,6 +633,8 @@ var stdlib = map[string][]string{
|
||||
"NameMismatch",
|
||||
"NewCertPool",
|
||||
"NotAuthorizedToSign",
|
||||
"OID",
|
||||
"OIDFromInts",
|
||||
"PEMCipher",
|
||||
"PEMCipher3DES",
|
||||
"PEMCipherAES128",
|
||||
@@ -706,6 +709,7 @@ var stdlib = map[string][]string{
|
||||
"LevelWriteCommitted",
|
||||
"Named",
|
||||
"NamedArg",
|
||||
"Null",
|
||||
"NullBool",
|
||||
"NullByte",
|
||||
"NullFloat64",
|
||||
@@ -1921,6 +1925,7 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_32",
|
||||
"R_LARCH_32_PCREL",
|
||||
"R_LARCH_64",
|
||||
"R_LARCH_64_PCREL",
|
||||
"R_LARCH_ABS64_HI12",
|
||||
"R_LARCH_ABS64_LO20",
|
||||
"R_LARCH_ABS_HI20",
|
||||
@@ -1928,12 +1933,17 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_ADD16",
|
||||
"R_LARCH_ADD24",
|
||||
"R_LARCH_ADD32",
|
||||
"R_LARCH_ADD6",
|
||||
"R_LARCH_ADD64",
|
||||
"R_LARCH_ADD8",
|
||||
"R_LARCH_ADD_ULEB128",
|
||||
"R_LARCH_ALIGN",
|
||||
"R_LARCH_B16",
|
||||
"R_LARCH_B21",
|
||||
"R_LARCH_B26",
|
||||
"R_LARCH_CFA",
|
||||
"R_LARCH_COPY",
|
||||
"R_LARCH_DELETE",
|
||||
"R_LARCH_GNU_VTENTRY",
|
||||
"R_LARCH_GNU_VTINHERIT",
|
||||
"R_LARCH_GOT64_HI12",
|
||||
@@ -1953,6 +1963,7 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_PCALA64_LO20",
|
||||
"R_LARCH_PCALA_HI20",
|
||||
"R_LARCH_PCALA_LO12",
|
||||
"R_LARCH_PCREL20_S2",
|
||||
"R_LARCH_RELATIVE",
|
||||
"R_LARCH_RELAX",
|
||||
"R_LARCH_SOP_ADD",
|
||||
@@ -1983,8 +1994,10 @@ var stdlib = map[string][]string{
|
||||
"R_LARCH_SUB16",
|
||||
"R_LARCH_SUB24",
|
||||
"R_LARCH_SUB32",
|
||||
"R_LARCH_SUB6",
|
||||
"R_LARCH_SUB64",
|
||||
"R_LARCH_SUB8",
|
||||
"R_LARCH_SUB_ULEB128",
|
||||
"R_LARCH_TLS_DTPMOD32",
|
||||
"R_LARCH_TLS_DTPMOD64",
|
||||
"R_LARCH_TLS_DTPREL32",
|
||||
@@ -2035,6 +2048,7 @@ var stdlib = map[string][]string{
|
||||
"R_MIPS_LO16",
|
||||
"R_MIPS_NONE",
|
||||
"R_MIPS_PC16",
|
||||
"R_MIPS_PC32",
|
||||
"R_MIPS_PJUMP",
|
||||
"R_MIPS_REL16",
|
||||
"R_MIPS_REL32",
|
||||
@@ -2952,6 +2966,8 @@ var stdlib = map[string][]string{
|
||||
"RegisterName",
|
||||
},
|
||||
"encoding/hex": {
|
||||
"AppendDecode",
|
||||
"AppendEncode",
|
||||
"Decode",
|
||||
"DecodeString",
|
||||
"DecodedLen",
|
||||
@@ -3233,6 +3249,7 @@ var stdlib = map[string][]string{
|
||||
"TypeSpec",
|
||||
"TypeSwitchStmt",
|
||||
"UnaryExpr",
|
||||
"Unparen",
|
||||
"ValueSpec",
|
||||
"Var",
|
||||
"Visitor",
|
||||
@@ -3492,6 +3509,7 @@ var stdlib = map[string][]string{
|
||||
"XOR_ASSIGN",
|
||||
},
|
||||
"go/types": {
|
||||
"Alias",
|
||||
"ArgumentError",
|
||||
"Array",
|
||||
"AssertableTo",
|
||||
@@ -3559,6 +3577,7 @@ var stdlib = map[string][]string{
|
||||
"MethodVal",
|
||||
"MissingMethod",
|
||||
"Named",
|
||||
"NewAlias",
|
||||
"NewArray",
|
||||
"NewChan",
|
||||
"NewChecker",
|
||||
@@ -3627,6 +3646,7 @@ var stdlib = map[string][]string{
|
||||
"Uint64",
|
||||
"Uint8",
|
||||
"Uintptr",
|
||||
"Unalias",
|
||||
"Union",
|
||||
"Universe",
|
||||
"Unsafe",
|
||||
@@ -3643,6 +3663,11 @@ var stdlib = map[string][]string{
|
||||
"WriteSignature",
|
||||
"WriteType",
|
||||
},
|
||||
"go/version": {
|
||||
"Compare",
|
||||
"IsValid",
|
||||
"Lang",
|
||||
},
|
||||
"hash": {
|
||||
"Hash",
|
||||
"Hash32",
|
||||
@@ -4078,6 +4103,7 @@ var stdlib = map[string][]string{
|
||||
"NewTextHandler",
|
||||
"Record",
|
||||
"SetDefault",
|
||||
"SetLogLoggerLevel",
|
||||
"Source",
|
||||
"SourceKey",
|
||||
"String",
|
||||
@@ -4367,6 +4393,35 @@ var stdlib = map[string][]string{
|
||||
"Uint64",
|
||||
"Zipf",
|
||||
},
|
||||
"math/rand/v2": {
|
||||
"ChaCha8",
|
||||
"ExpFloat64",
|
||||
"Float32",
|
||||
"Float64",
|
||||
"Int",
|
||||
"Int32",
|
||||
"Int32N",
|
||||
"Int64",
|
||||
"Int64N",
|
||||
"IntN",
|
||||
"N",
|
||||
"New",
|
||||
"NewChaCha8",
|
||||
"NewPCG",
|
||||
"NewZipf",
|
||||
"NormFloat64",
|
||||
"PCG",
|
||||
"Perm",
|
||||
"Rand",
|
||||
"Shuffle",
|
||||
"Source",
|
||||
"Uint32",
|
||||
"Uint32N",
|
||||
"Uint64",
|
||||
"Uint64N",
|
||||
"UintN",
|
||||
"Zipf",
|
||||
},
|
||||
"mime": {
|
||||
"AddExtensionType",
|
||||
"BEncoding",
|
||||
@@ -4540,6 +4595,7 @@ var stdlib = map[string][]string{
|
||||
"FS",
|
||||
"File",
|
||||
"FileServer",
|
||||
"FileServerFS",
|
||||
"FileSystem",
|
||||
"Flusher",
|
||||
"Get",
|
||||
@@ -4566,6 +4622,7 @@ var stdlib = map[string][]string{
|
||||
"MethodPut",
|
||||
"MethodTrace",
|
||||
"NewFileTransport",
|
||||
"NewFileTransportFS",
|
||||
"NewRequest",
|
||||
"NewRequestWithContext",
|
||||
"NewResponseController",
|
||||
@@ -4599,6 +4656,7 @@ var stdlib = map[string][]string{
|
||||
"Serve",
|
||||
"ServeContent",
|
||||
"ServeFile",
|
||||
"ServeFileFS",
|
||||
"ServeMux",
|
||||
"ServeTLS",
|
||||
"Server",
|
||||
@@ -5106,6 +5164,7 @@ var stdlib = map[string][]string{
|
||||
"StructTag",
|
||||
"Swapper",
|
||||
"Type",
|
||||
"TypeFor",
|
||||
"TypeOf",
|
||||
"Uint",
|
||||
"Uint16",
|
||||
@@ -5342,6 +5401,7 @@ var stdlib = map[string][]string{
|
||||
"CompactFunc",
|
||||
"Compare",
|
||||
"CompareFunc",
|
||||
"Concat",
|
||||
"Contains",
|
||||
"ContainsFunc",
|
||||
"Delete",
|
||||
@@ -10824,6 +10884,7 @@ var stdlib = map[string][]string{
|
||||
"Value",
|
||||
},
|
||||
"testing/slogtest": {
|
||||
"Run",
|
||||
"TestHandler",
|
||||
},
|
||||
"text/scanner": {
|
||||
|
||||
@@ -2,11 +2,14 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.22
|
||||
// +build !go1.22
|
||||
|
||||
package versions
|
||||
|
||||
// Note: If we use build tags to use go/versions when go >=1.22,
|
||||
// we run into go.dev/issue/53737. Under some operations users would see an
|
||||
// import of "go/versions" even if they would not compile the file.
|
||||
// For example, during `go get -u ./...` (go.dev/issue/64490) we do not try to include
|
||||
// For this reason, this library just a clone of go/versions for the moment.
|
||||
|
||||
// Lang returns the Go language version for version x.
|
||||
// If x is not a valid version, Lang returns the empty string.
|
||||
// For example:
|
||||
38
vendor/golang.org/x/tools/internal/versions/versions_go122.go
generated
vendored
38
vendor/golang.org/x/tools/internal/versions/versions_go122.go
generated
vendored
@@ -1,38 +0,0 @@
|
||||
// Copyright 2023 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.22
|
||||
// +build go1.22
|
||||
|
||||
package versions
|
||||
|
||||
import (
|
||||
"go/version"
|
||||
)
|
||||
|
||||
// Lang returns the Go language version for version x.
|
||||
// If x is not a valid version, Lang returns the empty string.
|
||||
// For example:
|
||||
//
|
||||
// Lang("go1.21rc2") = "go1.21"
|
||||
// Lang("go1.21.2") = "go1.21"
|
||||
// Lang("go1.21") = "go1.21"
|
||||
// Lang("go1") = "go1"
|
||||
// Lang("bad") = ""
|
||||
// Lang("1.21") = ""
|
||||
func Lang(x string) string { return version.Lang(x) }
|
||||
|
||||
// Compare returns -1, 0, or +1 depending on whether
|
||||
// x < y, x == y, or x > y, interpreted as Go versions.
|
||||
// The versions x and y must begin with a "go" prefix: "go1.21" not "1.21".
|
||||
// Invalid versions, including the empty string, compare less than
|
||||
// valid versions and equal to each other.
|
||||
// The language version "go1.21" compares less than the
|
||||
// release candidate and eventual releases "go1.21rc1" and "go1.21.0".
|
||||
// Custom toolchain suffixes are ignored during comparison:
|
||||
// "go1.21.0" and "go1.21.0-bigcorp" are equal.
|
||||
func Compare(x, y string) int { return version.Compare(x, y) }
|
||||
|
||||
// IsValid reports whether the version x is valid.
|
||||
func IsValid(x string) bool { return version.IsValid(x) }
|
||||
202
vendor/k8s.io/gengo/v2/LICENSE
generated
vendored
Normal file
202
vendor/k8s.io/gengo/v2/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,202 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
53
vendor/k8s.io/gengo/v2/README.md
generated
vendored
Normal file
53
vendor/k8s.io/gengo/v2/README.md
generated
vendored
Normal file
@@ -0,0 +1,53 @@
|
||||
[![GoDoc Widget]][GoDoc] [![GoReport]][GoReportStatus]
|
||||
|
||||
[GoDoc]: https://godoc.org/k8s.io/gengo
|
||||
[GoDoc Widget]: https://godoc.org/k8s.io/gengo?status.svg
|
||||
[GoReport]: https://goreportcard.com/badge/github.com/kubernetes/gengo
|
||||
[GoReportStatus]: https://goreportcard.com/report/github.com/kubernetes/gengo
|
||||
|
||||
# Gengo: a framework for building simple code generators
|
||||
|
||||
This repo is used by Kubernetes to build some codegen tooling. It is not
|
||||
intended to be general-purpose and makes some assumptions that may not hold
|
||||
outside of Kubernetes.
|
||||
|
||||
In the past this repo was partially supported for external use (outside of the
|
||||
Kubernetes project overall), but that is no longer true. We may change the API
|
||||
in incompatible ways, without warning.
|
||||
|
||||
If you are not building something that is part of Kubernetes, DO NOT DEPEND ON
|
||||
THIS REPO.
|
||||
|
||||
## New usage within Kubernetes
|
||||
|
||||
Gengo is a very opinionated framework. It is primarily aimed at generating Go
|
||||
code derived from types defined in other Go code, but it is possible to use it
|
||||
for other things (e.g. proto files). Net new tools should consider using
|
||||
`golang.org/x/tools/go/packages` directly. Gengo can serve as an example of
|
||||
how to do that.
|
||||
|
||||
If you still decide you want to use gengo, see the
|
||||
[simple examples](./examples) in this repo or the more extensive tools in the
|
||||
Kubernetes [code-generator](https://github.com/kubernetes/code-generator/)
|
||||
repo.
|
||||
|
||||
## Overview
|
||||
|
||||
Gengo is used to build tools (generally a tool is a binary). Each tool
|
||||
describes some number of `Targets`. A target is a single output package, which
|
||||
may be the same as the inputs (if the tool generates code alongside the inputs)
|
||||
or different. Each `Target` describes some number of `Generators`. A
|
||||
generator is responsible for emitting a single file into the target directory.
|
||||
|
||||
Gengo helps the tool to load and process input packages, e.g. extracting type
|
||||
information and associating comments. Each target will be offered every known
|
||||
type, and can filter that down to the set of types it cares about. Each
|
||||
generator will be offered the result of the target's filtering, and can filter
|
||||
the set of types further. Finally, the generator will be called to emit code
|
||||
for all of the remaining types.
|
||||
|
||||
The `tracer` example in this repo can be used to examine all of the hooks.
|
||||
|
||||
## Contributing
|
||||
|
||||
Please see [CONTRIBUTING.md](CONTRIBUTING.md) for instructions on how to contribute.
|
||||
83
vendor/k8s.io/gengo/v2/comments.go
generated
vendored
Normal file
83
vendor/k8s.io/gengo/v2/comments.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package gengo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ExtractCommentTags parses comments for lines of the form:
|
||||
//
|
||||
// 'marker' + "key=value".
|
||||
//
|
||||
// Values are optional; "" is the default. A tag can be specified more than
|
||||
// one time and all values are returned. If the resulting map has an entry for
|
||||
// a key, the value (a slice) is guaranteed to have at least 1 element.
|
||||
//
|
||||
// Example: if you pass "+" for 'marker', and the following lines are in
|
||||
// the comments:
|
||||
//
|
||||
// +foo=value1
|
||||
// +bar
|
||||
// +foo=value2
|
||||
// +baz="qux"
|
||||
//
|
||||
// Then this function will return:
|
||||
//
|
||||
// map[string][]string{"foo":{"value1, "value2"}, "bar": {""}, "baz": {"qux"}}
|
||||
func ExtractCommentTags(marker string, lines []string) map[string][]string {
|
||||
out := map[string][]string{}
|
||||
for _, line := range lines {
|
||||
line = strings.Trim(line, " ")
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
if !strings.HasPrefix(line, marker) {
|
||||
continue
|
||||
}
|
||||
// TODO: we could support multiple values per key if we split on spaces
|
||||
kv := strings.SplitN(line[len(marker):], "=", 2)
|
||||
if len(kv) == 2 {
|
||||
out[kv[0]] = append(out[kv[0]], kv[1])
|
||||
} else if len(kv) == 1 {
|
||||
out[kv[0]] = append(out[kv[0]], "")
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// ExtractSingleBoolCommentTag parses comments for lines of the form:
|
||||
//
|
||||
// 'marker' + "key=value1"
|
||||
//
|
||||
// If the tag is not found, the default value is returned. Values are asserted
|
||||
// to be boolean ("true" or "false"), and any other value will cause an error
|
||||
// to be returned. If the key has multiple values, the first one will be used.
|
||||
func ExtractSingleBoolCommentTag(marker string, key string, defaultVal bool, lines []string) (bool, error) {
|
||||
values := ExtractCommentTags(marker, lines)[key]
|
||||
if values == nil {
|
||||
return defaultVal, nil
|
||||
}
|
||||
if values[0] == "true" {
|
||||
return true, nil
|
||||
}
|
||||
if values[0] == "false" {
|
||||
return false, nil
|
||||
}
|
||||
return false, fmt.Errorf("tag value for %q is not boolean: %q", key, values[0])
|
||||
}
|
||||
98
vendor/k8s.io/gengo/v2/execute.go
generated
vendored
Normal file
98
vendor/k8s.io/gengo/v2/execute.go
generated
vendored
Normal file
@@ -0,0 +1,98 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package gengo is a code-generation framework.
|
||||
package gengo
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/gengo/v2/generator"
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/parser"
|
||||
)
|
||||
|
||||
// StdBuildTag is a suggested build-tag which tools can use both as an argument
|
||||
// to GoBoilerplate and to Execute.
|
||||
const StdBuildTag = "ignore_autogenerated"
|
||||
|
||||
// StdGeneratedBy is a suggested "generated by" line which tools can use as an
|
||||
// argument to GoBoilerplate.
|
||||
const StdGeneratedBy = "// Code generated by GENERATOR_NAME. DO NOT EDIT."
|
||||
|
||||
// GoBoilerplate returns the Go file header:
|
||||
// - an optional build tag (negative, set it to ignore generated code)
|
||||
// - an optional boilerplate file
|
||||
// - an optional "generated by" comment
|
||||
func GoBoilerplate(headerFile, buildTag, generatedBy string) ([]byte, error) {
|
||||
buf := bytes.Buffer{}
|
||||
|
||||
if buildTag != "" {
|
||||
buf.WriteString(
|
||||
fmt.Sprintf("//go:build !%s\n// +build !%s\n\n", buildTag, buildTag))
|
||||
}
|
||||
|
||||
if headerFile != "" {
|
||||
b, err := os.ReadFile(headerFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = bytes.ReplaceAll(b, []byte("YEAR"), []byte(strconv.Itoa(time.Now().UTC().Year())))
|
||||
buf.Write(b)
|
||||
buf.WriteByte('\n')
|
||||
}
|
||||
|
||||
if generatedBy != "" {
|
||||
generatorName := filepath.Base(os.Args[0])
|
||||
// Strip the extension from the name to normalize output between *nix and Windows.
|
||||
generatorName = generatorName[:len(generatorName)-len(filepath.Ext(generatorName))]
|
||||
generatedByComment := strings.ReplaceAll(generatedBy, "GENERATOR_NAME", generatorName)
|
||||
buf.WriteString(fmt.Sprintf("%s\n\n", generatedByComment))
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Execute implements most of a tool's main loop.
|
||||
func Execute(nameSystems namer.NameSystems, defaultSystem string, getTargets func(*generator.Context) []generator.Target, buildTag string, patterns []string) error {
|
||||
var buildTags []string
|
||||
if buildTag != "" {
|
||||
buildTags = append(buildTags, buildTag)
|
||||
}
|
||||
|
||||
p := parser.NewWithOptions(parser.Options{BuildTags: buildTags})
|
||||
if err := p.LoadPackages(patterns...); err != nil {
|
||||
return fmt.Errorf("failed making a parser: %v", err)
|
||||
}
|
||||
|
||||
c, err := generator.NewContext(p, nameSystems, defaultSystem)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed making a context: %v", err)
|
||||
}
|
||||
|
||||
targets := getTargets(c)
|
||||
if err := c.ExecuteTargets(targets); err != nil {
|
||||
return fmt.Errorf("failed executing generator: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
31
vendor/k8s.io/gengo/v2/generator/doc.go
generated
vendored
Normal file
31
vendor/k8s.io/gengo/v2/generator/doc.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package generator defines an interface for code generators to implement.
|
||||
//
|
||||
// To use this package, you'll implement the "Package" and "Generator"
|
||||
// interfaces; you'll call NewContext to load up the types you want to work
|
||||
// with, and then you'll call one or more of the Execute methods. See the
|
||||
// interface definitions for explanations. All output will have gofmt called on
|
||||
// it automatically, so you do not need to worry about generating correct
|
||||
// indentation.
|
||||
//
|
||||
// This package also exposes SnippetWriter. SnippetWriter reduces to a minimum
|
||||
// the boilerplate involved in setting up a template from go's text/template
|
||||
// package. Additionally, all naming systems in the Context will be added as
|
||||
// functions to the parsed template, so that they can be called directly from
|
||||
// your templates!
|
||||
package generator // import "k8s.io/gengo/v2/generator"
|
||||
50
vendor/k8s.io/gengo/v2/generator/error_tracker.go
generated
vendored
Normal file
50
vendor/k8s.io/gengo/v2/generator/error_tracker.go
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// ErrorTracker tracks errors to the underlying writer, so that you can ignore
|
||||
// them until you're ready to return.
|
||||
type ErrorTracker struct {
|
||||
io.Writer
|
||||
err error
|
||||
}
|
||||
|
||||
// NewErrorTracker makes a new error tracker; note that it implements io.Writer.
|
||||
func NewErrorTracker(w io.Writer) *ErrorTracker {
|
||||
return &ErrorTracker{Writer: w}
|
||||
}
|
||||
|
||||
// Write intercepts calls to Write.
|
||||
func (et *ErrorTracker) Write(p []byte) (n int, err error) {
|
||||
if et.err != nil {
|
||||
return 0, et.err
|
||||
}
|
||||
n, err = et.Writer.Write(p)
|
||||
if err != nil {
|
||||
et.err = err
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Error returns nil if no error has occurred, otherwise it returns the error.
|
||||
func (et *ErrorTracker) Error() error {
|
||||
return et.err
|
||||
}
|
||||
266
vendor/k8s.io/gengo/v2/generator/execute.go
generated
vendored
Normal file
266
vendor/k8s.io/gengo/v2/generator/execute.go
generated
vendored
Normal file
@@ -0,0 +1,266 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/imports"
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// ExecuteTargets runs the generators for the provided targets.
|
||||
func (c *Context) ExecuteTargets(targets []Target) error {
|
||||
klog.V(5).Infof("ExecuteTargets: %d targets", len(targets))
|
||||
|
||||
var errs []error
|
||||
for _, tgt := range targets {
|
||||
if err := c.ExecuteTarget(tgt); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("some targets had errors: %w", errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type DefaultFileType struct {
|
||||
Format func([]byte) ([]byte, error)
|
||||
Assemble func(io.Writer, *File)
|
||||
}
|
||||
|
||||
func (ft DefaultFileType) AssembleFile(f *File, pathname string) error {
|
||||
klog.V(5).Infof("Assembling file %q", pathname)
|
||||
|
||||
destFile, err := os.Create(pathname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer destFile.Close()
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
et := NewErrorTracker(b)
|
||||
ft.Assemble(et, f)
|
||||
if et.Error() != nil {
|
||||
return et.Error()
|
||||
}
|
||||
if formatted, err := ft.Format(b.Bytes()); err != nil {
|
||||
err = fmt.Errorf("unable to format file %q (%v)", pathname, err)
|
||||
// Write the file anyway, so they can see what's going wrong and fix the generator.
|
||||
if _, err2 := destFile.Write(b.Bytes()); err2 != nil {
|
||||
return err2
|
||||
}
|
||||
return err
|
||||
} else {
|
||||
_, err = destFile.Write(formatted)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func assembleGoFile(w io.Writer, f *File) {
|
||||
w.Write(f.Header)
|
||||
fmt.Fprintf(w, "package %v\n\n", f.PackageName)
|
||||
|
||||
if len(f.Imports) > 0 {
|
||||
fmt.Fprint(w, "import (\n")
|
||||
for i := range f.Imports {
|
||||
if strings.Contains(i, "\"") {
|
||||
// they included quotes, or are using the
|
||||
// `name "path/to/pkg"` format.
|
||||
fmt.Fprintf(w, "\t%s\n", i)
|
||||
} else {
|
||||
fmt.Fprintf(w, "\t%q\n", i)
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, ")\n\n")
|
||||
}
|
||||
|
||||
if f.Vars.Len() > 0 {
|
||||
fmt.Fprint(w, "var (\n")
|
||||
w.Write(f.Vars.Bytes())
|
||||
fmt.Fprint(w, ")\n\n")
|
||||
}
|
||||
|
||||
if f.Consts.Len() > 0 {
|
||||
fmt.Fprint(w, "const (\n")
|
||||
w.Write(f.Consts.Bytes())
|
||||
fmt.Fprint(w, ")\n\n")
|
||||
}
|
||||
|
||||
w.Write(f.Body.Bytes())
|
||||
}
|
||||
|
||||
func importsWrapper(src []byte) ([]byte, error) {
|
||||
return imports.Process("", src, nil)
|
||||
}
|
||||
|
||||
func NewGoFile() *DefaultFileType {
|
||||
return &DefaultFileType{
|
||||
Format: importsWrapper,
|
||||
Assemble: assembleGoFile,
|
||||
}
|
||||
}
|
||||
|
||||
// format should be one line only, and not end with \n.
|
||||
func addIndentHeaderComment(b *bytes.Buffer, format string, args ...interface{}) {
|
||||
if b.Len() > 0 {
|
||||
fmt.Fprintf(b, "\n// "+format+"\n", args...)
|
||||
} else {
|
||||
fmt.Fprintf(b, "// "+format+"\n", args...)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Context) filteredBy(f func(*Context, *types.Type) bool) *Context {
|
||||
c2 := *c
|
||||
c2.Order = []*types.Type{}
|
||||
for _, t := range c.Order {
|
||||
if f(c, t) {
|
||||
c2.Order = append(c2.Order, t)
|
||||
}
|
||||
}
|
||||
return &c2
|
||||
}
|
||||
|
||||
// make a new context; inheret c.Namers, but add on 'namers'. In case of a name
|
||||
// collision, the namer in 'namers' wins.
|
||||
func (c *Context) addNameSystems(namers namer.NameSystems) *Context {
|
||||
if namers == nil {
|
||||
return c
|
||||
}
|
||||
c2 := *c
|
||||
// Copy the existing name systems so we don't corrupt a parent context
|
||||
c2.Namers = namer.NameSystems{}
|
||||
for k, v := range c.Namers {
|
||||
c2.Namers[k] = v
|
||||
}
|
||||
|
||||
for name, namer := range namers {
|
||||
c2.Namers[name] = namer
|
||||
}
|
||||
return &c2
|
||||
}
|
||||
|
||||
// ExecuteTarget runs the generators for a single target.
|
||||
func (c *Context) ExecuteTarget(tgt Target) error {
|
||||
tgtDir := tgt.Dir()
|
||||
if tgtDir == "" {
|
||||
return fmt.Errorf("no directory for target %s", tgt.Path())
|
||||
}
|
||||
klog.V(5).Infof("Executing target %q (%q)", tgt.Name(), tgtDir)
|
||||
|
||||
// Filter out any types the *package* doesn't care about.
|
||||
packageContext := c.filteredBy(tgt.Filter)
|
||||
|
||||
if err := os.MkdirAll(tgtDir, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
files := map[string]*File{}
|
||||
for _, g := range tgt.Generators(packageContext) {
|
||||
// Filter out types the *generator* doesn't care about.
|
||||
genContext := packageContext.filteredBy(g.Filter)
|
||||
// Now add any extra name systems defined by this generator
|
||||
genContext = genContext.addNameSystems(g.Namers(genContext))
|
||||
|
||||
fileType := g.FileType()
|
||||
if len(fileType) == 0 {
|
||||
return fmt.Errorf("generator %q must specify a file type", g.Name())
|
||||
}
|
||||
f := files[g.Filename()]
|
||||
if f == nil {
|
||||
// This is the first generator to reference this file, so start it.
|
||||
f = &File{
|
||||
Name: g.Filename(),
|
||||
FileType: fileType,
|
||||
PackageName: tgt.Name(),
|
||||
PackagePath: tgt.Path(),
|
||||
PackageDir: tgt.Dir(),
|
||||
Header: tgt.Header(g.Filename()),
|
||||
Imports: map[string]struct{}{},
|
||||
}
|
||||
files[f.Name] = f
|
||||
} else if f.FileType != g.FileType() {
|
||||
return fmt.Errorf("file %q already has type %q, but generator %q wants to use type %q", f.Name, f.FileType, g.Name(), g.FileType())
|
||||
}
|
||||
|
||||
if vars := g.PackageVars(genContext); len(vars) > 0 {
|
||||
addIndentHeaderComment(&f.Vars, "Package-wide variables from generator %q.", g.Name())
|
||||
for _, v := range vars {
|
||||
if _, err := fmt.Fprintf(&f.Vars, "%s\n", v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if consts := g.PackageConsts(genContext); len(consts) > 0 {
|
||||
addIndentHeaderComment(&f.Consts, "Package-wide consts from generator %q.", g.Name())
|
||||
for _, v := range consts {
|
||||
if _, err := fmt.Fprintf(&f.Consts, "%s\n", v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := genContext.executeBody(&f.Body, g); err != nil {
|
||||
return err
|
||||
}
|
||||
if imports := g.Imports(genContext); len(imports) > 0 {
|
||||
for _, i := range imports {
|
||||
f.Imports[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var errs []error
|
||||
for _, f := range files {
|
||||
finalPath := filepath.Join(tgtDir, f.Name)
|
||||
assembler, ok := c.FileTypes[f.FileType]
|
||||
if !ok {
|
||||
return fmt.Errorf("the file type %q registered for file %q does not exist in the context", f.FileType, f.Name)
|
||||
}
|
||||
if err := assembler.AssembleFile(f, finalPath); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("errors in target %q: %w", tgt.Path(), errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Context) executeBody(w io.Writer, generator Generator) error {
|
||||
et := NewErrorTracker(w)
|
||||
if err := generator.Init(c, et); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, t := range c.Order {
|
||||
if err := generator.GenerateType(c, t, et); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := generator.Finalize(c, et); err != nil {
|
||||
return err
|
||||
}
|
||||
return et.Error()
|
||||
}
|
||||
214
vendor/k8s.io/gengo/v2/generator/generator.go
generated
vendored
Normal file
214
vendor/k8s.io/gengo/v2/generator/generator.go
generated
vendored
Normal file
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/parser"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// Target describes a Go package into which code will be generated. A single
|
||||
// Target may have many Generators, each of which emits one file.
|
||||
type Target interface {
|
||||
// Name returns the package short name (as in `package foo`).
|
||||
Name() string
|
||||
// Path returns the package import path (as in `import "example.com/foo"`).
|
||||
Path() string
|
||||
// Dir returns the location of the resulting package on disk. This may be
|
||||
// the same directory as an input package (when generating code in-place)
|
||||
// or a different directory entirely.
|
||||
Dir() string
|
||||
|
||||
// Filter should return true if this package cares about this type.
|
||||
// Otherwise, this type will be omitted from the type ordering for
|
||||
// this package.
|
||||
Filter(*Context, *types.Type) bool
|
||||
|
||||
// Header should return a header for the file, including comment markers.
|
||||
// Useful for copyright notices and doc strings. Include an
|
||||
// autogeneration notice! Do not include the "package x" line.
|
||||
Header(filename string) []byte
|
||||
|
||||
// Generators returns the list of generators for this package. It is
|
||||
// allowed for more than one generator to write to the same file.
|
||||
// A Context is passed in case the list of generators depends on the
|
||||
// input types.
|
||||
Generators(*Context) []Generator
|
||||
}
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
FileType string
|
||||
PackageName string
|
||||
Header []byte
|
||||
PackagePath string
|
||||
PackageDir string
|
||||
Imports map[string]struct{}
|
||||
Vars bytes.Buffer
|
||||
Consts bytes.Buffer
|
||||
Body bytes.Buffer
|
||||
}
|
||||
|
||||
type FileType interface {
|
||||
AssembleFile(f *File, path string) error
|
||||
}
|
||||
|
||||
// Generator is the contract for anything that wants to do auto-generation.
|
||||
// It's expected that the io.Writers passed to the below functions will be
|
||||
// ErrorTrackers; this allows implementations to not check for io errors,
|
||||
// making more readable code.
|
||||
//
|
||||
// The call order for the functions that take a Context is:
|
||||
// 1. Filter() // Subsequent calls see only types that pass this.
|
||||
// 2. Namers() // Subsequent calls see the namers provided by this.
|
||||
// 3. PackageVars()
|
||||
// 4. PackageConsts()
|
||||
// 5. Init()
|
||||
// 6. GenerateType() // Called N times, once per type in the context's Order.
|
||||
// 7. Imports()
|
||||
//
|
||||
// You may have multiple generators for the same file.
|
||||
type Generator interface {
|
||||
// The name of this generator. Will be included in generated comments.
|
||||
Name() string
|
||||
|
||||
// Filter should return true if this generator cares about this type.
|
||||
// (otherwise, GenerateType will not be called.)
|
||||
//
|
||||
// Filter is called before any of the generator's other functions;
|
||||
// subsequent calls will get a context with only the types that passed
|
||||
// this filter.
|
||||
Filter(*Context, *types.Type) bool
|
||||
|
||||
// If this generator needs special namers, return them here. These will
|
||||
// override the original namers in the context if there is a collision.
|
||||
// You may return nil if you don't need special names. These names will
|
||||
// be available in the context passed to the rest of the generator's
|
||||
// functions.
|
||||
//
|
||||
// A use case for this is to return a namer that tracks imports.
|
||||
Namers(*Context) namer.NameSystems
|
||||
|
||||
// Init should write an init function, and any other content that's not
|
||||
// generated per-type. (It's not intended for generator specific
|
||||
// initialization! Do that when your Target constructs the
|
||||
// Generators.)
|
||||
Init(*Context, io.Writer) error
|
||||
|
||||
// Finalize should write finish up functions, and any other content that's not
|
||||
// generated per-type.
|
||||
Finalize(*Context, io.Writer) error
|
||||
|
||||
// PackageVars should emit an array of variable lines. They will be
|
||||
// placed in a var ( ... ) block. There's no need to include a leading
|
||||
// \t or trailing \n.
|
||||
PackageVars(*Context) []string
|
||||
|
||||
// PackageConsts should emit an array of constant lines. They will be
|
||||
// placed in a const ( ... ) block. There's no need to include a leading
|
||||
// \t or trailing \n.
|
||||
PackageConsts(*Context) []string
|
||||
|
||||
// GenerateType should emit the code for a particular type.
|
||||
GenerateType(*Context, *types.Type, io.Writer) error
|
||||
|
||||
// Imports should return a list of necessary imports. They will be
|
||||
// formatted correctly. You do not need to include quotation marks,
|
||||
// return only the package name; alternatively, you can also return
|
||||
// imports in the format `name "path/to/pkg"`. Imports will be called
|
||||
// after Init, PackageVars, PackageConsts, and GenerateType, to allow
|
||||
// you to keep track of what imports you actually need.
|
||||
Imports(*Context) []string
|
||||
|
||||
// Preferred file name of this generator, not including a path. It is
|
||||
// allowed for multiple generators to use the same filename, but it's
|
||||
// up to you to make sure they don't have colliding import names.
|
||||
// TODO: provide per-file import tracking, removing the requirement
|
||||
// that generators coordinate..
|
||||
Filename() string
|
||||
|
||||
// A registered file type in the context to generate this file with. If
|
||||
// the FileType is not found in the context, execution will stop.
|
||||
FileType() string
|
||||
}
|
||||
|
||||
// Context is global context for individual generators to consume.
|
||||
type Context struct {
|
||||
// A map from the naming system to the names for that system. E.g., you
|
||||
// might have public names and several private naming systems.
|
||||
Namers namer.NameSystems
|
||||
|
||||
// All the types, in case you want to look up something.
|
||||
Universe types.Universe
|
||||
|
||||
// All the user-specified packages. This is after recursive expansion.
|
||||
Inputs []string
|
||||
|
||||
// The canonical ordering of the types (will be filtered by both the
|
||||
// Target's and Generator's Filter methods).
|
||||
Order []*types.Type
|
||||
|
||||
// A set of types this context can process. If this is empty or nil,
|
||||
// the default "go" filetype will be provided.
|
||||
FileTypes map[string]FileType
|
||||
|
||||
// Allows generators to add packages at runtime.
|
||||
parser *parser.Parser
|
||||
}
|
||||
|
||||
// NewContext generates a context from the given parser, naming systems, and
|
||||
// the naming system you wish to construct the canonical ordering from.
|
||||
func NewContext(p *parser.Parser, nameSystems namer.NameSystems, canonicalOrderName string) (*Context, error) {
|
||||
universe, err := p.NewUniverse()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
c := &Context{
|
||||
Namers: namer.NameSystems{},
|
||||
Universe: universe,
|
||||
Inputs: p.UserRequestedPackages(),
|
||||
FileTypes: map[string]FileType{
|
||||
GoFileType: NewGoFile(),
|
||||
},
|
||||
parser: p,
|
||||
}
|
||||
|
||||
for name, systemNamer := range nameSystems {
|
||||
c.Namers[name] = systemNamer
|
||||
if name == canonicalOrderName {
|
||||
orderer := namer.Orderer{Namer: systemNamer}
|
||||
c.Order = orderer.OrderUniverse(universe)
|
||||
}
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// LoadPackages adds Go packages to the context.
|
||||
func (c *Context) LoadPackages(patterns ...string) ([]*types.Package, error) {
|
||||
return c.parser.LoadPackagesTo(&c.Universe, patterns...)
|
||||
}
|
||||
|
||||
// FindPackages expands Go package patterns into a list of package import
|
||||
// paths, akin to `go list -find`.
|
||||
func (c *Context) FindPackages(patterns ...string) ([]string, error) {
|
||||
return c.parser.FindPackages(patterns...)
|
||||
}
|
||||
61
vendor/k8s.io/gengo/v2/generator/go_generator.go
generated
vendored
Normal file
61
vendor/k8s.io/gengo/v2/generator/go_generator.go
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
const (
|
||||
GoFileType = "go"
|
||||
)
|
||||
|
||||
// GoGenerator implements a do-nothing Generator for Go files. It can be
|
||||
// used as a base for custom Generators, which embed it and then define the
|
||||
// methods they need to specialize.
|
||||
type GoGenerator struct {
|
||||
// OutputFilename is used as the Generator's name, and filename.
|
||||
OutputFilename string
|
||||
|
||||
// Body, if present, will be used as the return from the "Init" method.
|
||||
// This causes it to be static content for the entire file if no other
|
||||
// generator touches the file.
|
||||
OptionalBody []byte
|
||||
}
|
||||
|
||||
func (gg GoGenerator) Name() string { return gg.OutputFilename }
|
||||
func (gg GoGenerator) Filter(*Context, *types.Type) bool { return true }
|
||||
func (gg GoGenerator) Namers(*Context) namer.NameSystems { return nil }
|
||||
func (gg GoGenerator) Imports(*Context) []string { return []string{} }
|
||||
func (gg GoGenerator) PackageVars(*Context) []string { return []string{} }
|
||||
func (gg GoGenerator) PackageConsts(*Context) []string { return []string{} }
|
||||
func (gg GoGenerator) GenerateType(*Context, *types.Type, io.Writer) error { return nil }
|
||||
func (gg GoGenerator) Filename() string { return gg.OutputFilename }
|
||||
func (gg GoGenerator) FileType() string { return GoFileType }
|
||||
func (gg GoGenerator) Finalize(*Context, io.Writer) error { return nil }
|
||||
|
||||
func (gg GoGenerator) Init(c *Context, w io.Writer) error {
|
||||
_, err := w.Write(gg.OptionalBody)
|
||||
return err
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Generator(GoGenerator{})
|
||||
)
|
||||
89
vendor/k8s.io/gengo/v2/generator/import_tracker.go
generated
vendored
Normal file
89
vendor/k8s.io/gengo/v2/generator/import_tracker.go
generated
vendored
Normal file
@@ -0,0 +1,89 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"go/token"
|
||||
"strings"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// NewImportTrackerForPackage creates a new import tracker which is aware
|
||||
// of a generator's output package. The tracker will not add import lines
|
||||
// when symbols or types are added from the same package, and LocalNameOf
|
||||
// will return empty string for the output package.
|
||||
//
|
||||
// e.g.:
|
||||
//
|
||||
// tracker := NewImportTrackerForPackage("bar.com/pkg/foo")
|
||||
// tracker.AddSymbol(types.Name{"bar.com/pkg/foo.MyType"})
|
||||
// tracker.AddSymbol(types.Name{"bar.com/pkg/baz.MyType"})
|
||||
// tracker.AddSymbol(types.Name{"bar.com/pkg/baz/baz.MyType"})
|
||||
//
|
||||
// tracker.LocalNameOf("bar.com/pkg/foo") -> ""
|
||||
// tracker.LocalNameOf("bar.com/pkg/baz") -> "baz"
|
||||
// tracker.LocalNameOf("bar.com/pkg/baz/baz") -> "bazbaz"
|
||||
// tracker.ImportLines() -> {`baz "bar.com/pkg/baz"`, `bazbaz "bar.com/pkg/baz/baz"`}
|
||||
func NewImportTrackerForPackage(local string, typesToAdd ...*types.Type) *namer.DefaultImportTracker {
|
||||
tracker := namer.NewDefaultImportTracker(types.Name{Package: local})
|
||||
tracker.IsInvalidType = func(*types.Type) bool { return false }
|
||||
tracker.LocalName = func(name types.Name) string { return goTrackerLocalName(&tracker, name) }
|
||||
tracker.PrintImport = func(path, name string) string { return name + " \"" + path + "\"" }
|
||||
|
||||
tracker.AddTypes(typesToAdd...)
|
||||
return &tracker
|
||||
}
|
||||
|
||||
func NewImportTracker(typesToAdd ...*types.Type) *namer.DefaultImportTracker {
|
||||
return NewImportTrackerForPackage("", typesToAdd...)
|
||||
}
|
||||
|
||||
func goTrackerLocalName(tracker namer.ImportTracker, t types.Name) string {
|
||||
path := t.Package
|
||||
|
||||
// Using backslashes in package names causes gengo to produce Go code which
|
||||
// will not compile with the gc compiler. See the comment on GoSeperator.
|
||||
if strings.ContainsRune(path, '\\') {
|
||||
klog.Warningf("Warning: backslash used in import path '%v', this is unsupported.\n", path)
|
||||
}
|
||||
|
||||
dirs := strings.Split(path, namer.GoSeperator)
|
||||
for n := len(dirs) - 1; n >= 0; n-- {
|
||||
// follow kube convention of not having anything between directory names
|
||||
name := strings.Join(dirs[n:], "")
|
||||
name = strings.ReplaceAll(name, "_", "")
|
||||
// These characters commonly appear in import paths for go
|
||||
// packages, but aren't legal go names. So we'll sanitize.
|
||||
name = strings.ReplaceAll(name, ".", "")
|
||||
name = strings.ReplaceAll(name, "-", "")
|
||||
if _, found := tracker.PathOf(name); found {
|
||||
// This name collides with some other package
|
||||
continue
|
||||
}
|
||||
|
||||
// If the import name is a Go keyword, prefix with an underscore.
|
||||
if token.Lookup(name).IsKeyword() {
|
||||
name = "_" + name
|
||||
}
|
||||
return name
|
||||
}
|
||||
panic("can't find import for " + path)
|
||||
}
|
||||
77
vendor/k8s.io/gengo/v2/generator/simple_target.go
generated
vendored
Normal file
77
vendor/k8s.io/gengo/v2/generator/simple_target.go
generated
vendored
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// SimpleTarget is implements Target in terms of static configuration.
|
||||
// The package name, path, and dir are required to be non-empty.
|
||||
type SimpleTarget struct {
|
||||
// PkgName is the name of the resulting package (as in "package xxxx").
|
||||
// Required.
|
||||
PkgName string
|
||||
// PkgPath is the canonical Go import-path of the resulting package (as in
|
||||
// "import example.com/xxxx/yyyy"). Required.
|
||||
PkgPath string
|
||||
// PkgDir is the location of the resulting package on disk (which may not
|
||||
// exist yet). It may be absolute or relative to CWD. Required.
|
||||
PkgDir string
|
||||
|
||||
// HeaderComment is emitted at the top of every output file. Optional.
|
||||
HeaderComment []byte
|
||||
|
||||
// PkgDocComment is emitted after the header comment for a "doc.go" file.
|
||||
// Optional.
|
||||
PkgDocComment []byte
|
||||
|
||||
// FilterFunc will be called to implement Target.Filter. Optional.
|
||||
FilterFunc func(*Context, *types.Type) bool
|
||||
|
||||
// GeneratorsFunc will be called to implement Target.Generators. Optional.
|
||||
GeneratorsFunc func(*Context) []Generator
|
||||
}
|
||||
|
||||
func (st SimpleTarget) Name() string { return st.PkgName }
|
||||
func (st SimpleTarget) Path() string { return st.PkgPath }
|
||||
func (st SimpleTarget) Dir() string { return st.PkgDir }
|
||||
|
||||
func (st SimpleTarget) Filter(c *Context, t *types.Type) bool {
|
||||
if st.FilterFunc != nil {
|
||||
return st.FilterFunc(c, t)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (st SimpleTarget) Generators(c *Context) []Generator {
|
||||
if st.GeneratorsFunc != nil {
|
||||
return st.GeneratorsFunc(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (st SimpleTarget) Header(filename string) []byte {
|
||||
if filename == "doc.go" {
|
||||
return append(st.HeaderComment, st.PkgDocComment...)
|
||||
}
|
||||
return st.HeaderComment
|
||||
}
|
||||
|
||||
var (
|
||||
_ = Target(SimpleTarget{})
|
||||
)
|
||||
154
vendor/k8s.io/gengo/v2/generator/snippet_writer.go
generated
vendored
Normal file
154
vendor/k8s.io/gengo/v2/generator/snippet_writer.go
generated
vendored
Normal file
@@ -0,0 +1,154 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// SnippetWriter is an attempt to make the template library usable.
|
||||
// Methods are chainable, and you don't have to check Error() until you're all
|
||||
// done.
|
||||
type SnippetWriter struct {
|
||||
w io.Writer
|
||||
context *Context
|
||||
// Left & right delimiters. text/template defaults to "{{" and "}}"
|
||||
// which is totally unusable for go code based templates.
|
||||
left, right string
|
||||
funcMap template.FuncMap
|
||||
err error
|
||||
}
|
||||
|
||||
// w is the destination; left and right are the delimiters; @ and $ are both
|
||||
// reasonable choices.
|
||||
//
|
||||
// c is used to make a function for every naming system, to which you can pass
|
||||
// a type and get the corresponding name.
|
||||
func NewSnippetWriter(w io.Writer, c *Context, left, right string) *SnippetWriter {
|
||||
sw := &SnippetWriter{
|
||||
w: w,
|
||||
context: c,
|
||||
left: left,
|
||||
right: right,
|
||||
funcMap: template.FuncMap{},
|
||||
}
|
||||
for name, namer := range c.Namers {
|
||||
sw.funcMap[name] = namer.Name
|
||||
}
|
||||
return sw
|
||||
}
|
||||
|
||||
// Do parses format and runs args through it. You can have arbitrary logic in
|
||||
// the format (see the text/template documentation), but consider running many
|
||||
// short templates with ordinary go logic in between--this may be more
|
||||
// readable. Do is chainable. Any error causes every other call to do to be
|
||||
// ignored, and the error will be returned by Error(). So you can check it just
|
||||
// once, at the end of your function.
|
||||
//
|
||||
// 'args' can be quite literally anything; read the text/template documentation
|
||||
// for details. Maps and structs work particularly nicely. Conveniently, the
|
||||
// types package is designed to have structs that are easily referencable from
|
||||
// the template language.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// sw := generator.NewSnippetWriter(outBuffer, context, "$", "$")
|
||||
// sw.Do(`The public type name is: $.type|public$`, map[string]interface{}{"type": t})
|
||||
// return sw.Error()
|
||||
//
|
||||
// Where:
|
||||
// - "$" starts a template directive
|
||||
// - "." references the entire thing passed as args
|
||||
// - "type" therefore sees a map and looks up the key "type"
|
||||
// - "|" means "pass the thing on the left to the thing on the right"
|
||||
// - "public" is the name of a naming system, so the SnippetWriter has given
|
||||
// the template a function called "public" that takes a *types.Type and
|
||||
// returns the naming system's name. E.g., if the type is "string" this might
|
||||
// return "String".
|
||||
// - the second "$" ends the template directive.
|
||||
//
|
||||
// The map is actually not necessary. The below does the same thing:
|
||||
//
|
||||
// sw.Do(`The public type name is: $.|public$`, t)
|
||||
//
|
||||
// You may or may not find it more readable to use the map with a descriptive
|
||||
// key, but if you want to pass more than one arg, the map or a custom struct
|
||||
// becomes a requirement. You can do arbitrary logic inside these templates,
|
||||
// but you should consider doing the logic in go and stitching them together
|
||||
// for the sake of your readers.
|
||||
//
|
||||
// TODO: Change Do() to optionally take a list of pairs of parameters (key, value)
|
||||
// and have it construct a combined map with that and args.
|
||||
func (s *SnippetWriter) Do(format string, args interface{}) *SnippetWriter {
|
||||
if s.err != nil {
|
||||
return s
|
||||
}
|
||||
// Name the template by source file:line so it can be found when
|
||||
// there's an error.
|
||||
_, file, line, _ := runtime.Caller(1)
|
||||
tmpl, err := template.
|
||||
New(fmt.Sprintf("%s:%d", file, line)).
|
||||
Delims(s.left, s.right).
|
||||
Funcs(s.funcMap).
|
||||
Parse(format)
|
||||
if err != nil {
|
||||
s.err = err
|
||||
return s
|
||||
}
|
||||
err = tmpl.Execute(s.w, args)
|
||||
if err != nil {
|
||||
s.err = err
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Args exists to make it convenient to construct arguments for
|
||||
// SnippetWriter.Do.
|
||||
type Args map[interface{}]interface{}
|
||||
|
||||
// With makes a copy of a and adds the given key, value pair.
|
||||
func (a Args) With(key, value interface{}) Args {
|
||||
a2 := Args{key: value}
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
// WithArgs makes a copy of a and adds the given arguments.
|
||||
func (a Args) WithArgs(rhs Args) Args {
|
||||
a2 := Args{}
|
||||
for k, v := range rhs {
|
||||
a2[k] = v
|
||||
}
|
||||
for k, v := range a {
|
||||
a2[k] = v
|
||||
}
|
||||
return a2
|
||||
}
|
||||
|
||||
func (s *SnippetWriter) Out() io.Writer {
|
||||
return s.w
|
||||
}
|
||||
|
||||
// Error returns any encountered error.
|
||||
func (s *SnippetWriter) Error() error {
|
||||
return s.err
|
||||
}
|
||||
31
vendor/k8s.io/gengo/v2/namer/doc.go
generated
vendored
Normal file
31
vendor/k8s.io/gengo/v2/namer/doc.go
generated
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package namer has support for making different type naming systems.
|
||||
//
|
||||
// This is because sometimes you want to refer to the literal type, sometimes
|
||||
// you want to make a name for the thing you're generating, and you want to
|
||||
// make the name based on the type. For example, if you have `type foo string`,
|
||||
// you want to be able to generate something like `func FooPrinter(f *foo) {
|
||||
// Print(string(*f)) }`; that is, you want to refer to a public name, a literal
|
||||
// name, and the underlying literal name.
|
||||
//
|
||||
// This package supports the idea of a "Namer" and a set of "NameSystems" to
|
||||
// support these use cases.
|
||||
//
|
||||
// Additionally, a "RawNamer" can optionally keep track of what needs to be
|
||||
// imported.
|
||||
package namer // import "k8s.io/gengo/v2/namer"
|
||||
121
vendor/k8s.io/gengo/v2/namer/import_tracker.go
generated
vendored
Normal file
121
vendor/k8s.io/gengo/v2/namer/import_tracker.go
generated
vendored
Normal file
@@ -0,0 +1,121 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namer
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// ImportTracker may be passed to a namer.RawNamer, to track the imports needed
|
||||
// for the types it names.
|
||||
//
|
||||
// TODO: pay attention to the package name (instead of renaming every package).
|
||||
type DefaultImportTracker struct {
|
||||
pathToName map[string]string
|
||||
// forbidden names are in here. (e.g. "go" is a directory in which
|
||||
// there is code, but "go" is not a legal name for a package, so we put
|
||||
// it here to prevent us from naming any package "go")
|
||||
nameToPath map[string]string
|
||||
local types.Name
|
||||
|
||||
// Returns true if a given types is an invalid type and should be ignored.
|
||||
IsInvalidType func(*types.Type) bool
|
||||
// Returns the final local name for the given name
|
||||
LocalName func(types.Name) string
|
||||
// Returns the "import" line for a given (path, name).
|
||||
PrintImport func(string, string) string
|
||||
}
|
||||
|
||||
func NewDefaultImportTracker(local types.Name) DefaultImportTracker {
|
||||
return DefaultImportTracker{
|
||||
pathToName: map[string]string{},
|
||||
nameToPath: map[string]string{},
|
||||
local: local,
|
||||
}
|
||||
}
|
||||
|
||||
func (tracker *DefaultImportTracker) AddTypes(types ...*types.Type) {
|
||||
for _, t := range types {
|
||||
tracker.AddType(t)
|
||||
}
|
||||
}
|
||||
func (tracker *DefaultImportTracker) AddSymbol(symbol types.Name) {
|
||||
if tracker.local.Package == symbol.Package {
|
||||
return
|
||||
}
|
||||
|
||||
if len(symbol.Package) == 0 {
|
||||
return
|
||||
}
|
||||
path := symbol.Path
|
||||
if len(path) == 0 {
|
||||
path = symbol.Package
|
||||
}
|
||||
if _, ok := tracker.pathToName[path]; ok {
|
||||
return
|
||||
}
|
||||
|
||||
name := tracker.LocalName(symbol)
|
||||
tracker.nameToPath[name] = path
|
||||
tracker.pathToName[path] = name
|
||||
}
|
||||
|
||||
func (tracker *DefaultImportTracker) AddType(t *types.Type) {
|
||||
if tracker.local.Package == t.Name.Package {
|
||||
return
|
||||
}
|
||||
|
||||
if tracker.IsInvalidType(t) {
|
||||
if t.Kind == types.Builtin {
|
||||
return
|
||||
}
|
||||
if _, ok := tracker.nameToPath[t.Name.Package]; !ok {
|
||||
tracker.nameToPath[t.Name.Package] = ""
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
tracker.AddSymbol(t.Name)
|
||||
}
|
||||
|
||||
func (tracker *DefaultImportTracker) ImportLines() []string {
|
||||
importPaths := []string{}
|
||||
for path := range tracker.pathToName {
|
||||
importPaths = append(importPaths, path)
|
||||
}
|
||||
sort.Strings(importPaths)
|
||||
out := []string{}
|
||||
for _, path := range importPaths {
|
||||
out = append(out, tracker.PrintImport(path, tracker.pathToName[path]))
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// LocalNameOf returns the name you would use to refer to the package at the
|
||||
// specified path within the body of a file.
|
||||
func (tracker *DefaultImportTracker) LocalNameOf(path string) string {
|
||||
return tracker.pathToName[path]
|
||||
}
|
||||
|
||||
// PathOf returns the path that a given localName is referring to within the
|
||||
// body of a file.
|
||||
func (tracker *DefaultImportTracker) PathOf(localName string) (string, bool) {
|
||||
name, ok := tracker.nameToPath[localName]
|
||||
return name, ok
|
||||
}
|
||||
395
vendor/k8s.io/gengo/v2/namer/namer.go
generated
vendored
Normal file
395
vendor/k8s.io/gengo/v2/namer/namer.go
generated
vendored
Normal file
@@ -0,0 +1,395 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// GoSeperator is used to split go import paths.
|
||||
// Forward slash is used instead of filepath.Seperator because it is the
|
||||
// only universally-accepted path delimiter and the only delimiter not
|
||||
// potentially forbidden by Go compilers. (In particular gc does not allow
|
||||
// the use of backslashes in import paths.)
|
||||
// See https://golang.org/ref/spec#Import_declarations.
|
||||
// See also https://github.com/kubernetes/gengo/issues/83#issuecomment-367040772.
|
||||
GoSeperator = "/"
|
||||
)
|
||||
|
||||
// Returns whether a name is a private Go name.
|
||||
func IsPrivateGoName(name string) bool {
|
||||
return len(name) == 0 || strings.ToLower(name[:1]) == name[:1]
|
||||
}
|
||||
|
||||
// NewPublicNamer is a helper function that returns a namer that makes
|
||||
// CamelCase names. See the NameStrategy struct for an explanation of the
|
||||
// arguments to this constructor.
|
||||
func NewPublicNamer(prependPackageNames int, ignoreWords ...string) *NameStrategy {
|
||||
n := &NameStrategy{
|
||||
Join: Joiner(IC, IC),
|
||||
IgnoreWords: map[string]bool{},
|
||||
PrependPackageNames: prependPackageNames,
|
||||
}
|
||||
for _, w := range ignoreWords {
|
||||
n.IgnoreWords[w] = true
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// NewPrivateNamer is a helper function that returns a namer that makes
|
||||
// camelCase names. See the NameStrategy struct for an explanation of the
|
||||
// arguments to this constructor.
|
||||
func NewPrivateNamer(prependPackageNames int, ignoreWords ...string) *NameStrategy {
|
||||
n := &NameStrategy{
|
||||
Join: Joiner(IL, IC),
|
||||
IgnoreWords: map[string]bool{},
|
||||
PrependPackageNames: prependPackageNames,
|
||||
}
|
||||
for _, w := range ignoreWords {
|
||||
n.IgnoreWords[w] = true
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// NewRawNamer will return a Namer that makes a name by which you would
|
||||
// directly refer to a type, optionally keeping track of the import paths
|
||||
// necessary to reference the names it provides. Tracker may be nil.
|
||||
// The 'pkg' is the full package name, in which the Namer is used - all
|
||||
// types from that package will be referenced by just type name without
|
||||
// referencing the package.
|
||||
//
|
||||
// For example, if the type is map[string]int, a raw namer will literally
|
||||
// return "map[string]int".
|
||||
//
|
||||
// Or if the type, in package foo, is "type Bar struct { ... }", then the raw
|
||||
// namer will return "foo.Bar" as the name of the type, and if 'tracker' was
|
||||
// not nil, will record that package foo needs to be imported.
|
||||
func NewRawNamer(pkg string, tracker ImportTracker) *rawNamer {
|
||||
return &rawNamer{pkg: pkg, tracker: tracker}
|
||||
}
|
||||
|
||||
// Names is a map from Type to name, as defined by some Namer.
|
||||
type Names map[*types.Type]string
|
||||
|
||||
// Namer takes a type, and assigns a name.
|
||||
//
|
||||
// The purpose of this complexity is so that you can assign coherent
|
||||
// side-by-side systems of names for the types. For example, you might want a
|
||||
// public interface, a private implementation struct, and also to reference
|
||||
// literally the type name.
|
||||
//
|
||||
// Note that it is safe to call your own Name() function recursively to find
|
||||
// the names of keys, elements, etc. This is because anonymous types can't have
|
||||
// cycles in their names, and named types don't require the sort of recursion
|
||||
// that would be problematic.
|
||||
type Namer interface {
|
||||
Name(*types.Type) string
|
||||
}
|
||||
|
||||
// NameSystems is a map of a system name to a namer for that system.
|
||||
type NameSystems map[string]Namer
|
||||
|
||||
// NameStrategy is a general Namer. The easiest way to use it is to copy the
|
||||
// Public/PrivateNamer variables, and modify the members you wish to change.
|
||||
//
|
||||
// The Name method produces a name for the given type, of the forms:
|
||||
// Anonymous types: <Prefix><Type description><Suffix>
|
||||
// Named types: <Prefix><Optional Prepended Package name(s)><Original name><Suffix>
|
||||
//
|
||||
// In all cases, every part of the name is run through the capitalization
|
||||
// functions.
|
||||
//
|
||||
// The IgnoreWords map can be set if you have directory names that are
|
||||
// semantically meaningless for naming purposes, e.g. "proto".
|
||||
//
|
||||
// Prefix and Suffix can be used to disambiguate parallel systems of type
|
||||
// names. For example, if you want to generate an interface and an
|
||||
// implementation, you might want to suffix one with "Interface" and the other
|
||||
// with "Implementation". Another common use-- if you want to generate private
|
||||
// types, and one of your source types could be "string", you can't use the
|
||||
// default lowercase private namer. You'll have to add a suffix or prefix.
|
||||
type NameStrategy struct {
|
||||
Prefix, Suffix string
|
||||
Join func(pre string, parts []string, post string) string
|
||||
|
||||
// Add non-meaningful package directory names here (e.g. "proto") and
|
||||
// they will be ignored.
|
||||
IgnoreWords map[string]bool
|
||||
|
||||
// If > 0, prepend exactly that many package directory names (or as
|
||||
// many as there are). Package names listed in "IgnoreWords" will be
|
||||
// ignored.
|
||||
//
|
||||
// For example, if Ignore words lists "proto" and type Foo is in
|
||||
// pkg/server/frobbing/proto, then a value of 1 will give a type name
|
||||
// of FrobbingFoo, 2 gives ServerFrobbingFoo, etc.
|
||||
PrependPackageNames int
|
||||
|
||||
// A cache of names thus far assigned by this namer.
|
||||
Names
|
||||
}
|
||||
|
||||
// IC ensures the first character is uppercase.
|
||||
func IC(in string) string {
|
||||
if in == "" {
|
||||
return in
|
||||
}
|
||||
return strings.ToUpper(in[:1]) + in[1:]
|
||||
}
|
||||
|
||||
// IL ensures the first character is lowercase.
|
||||
func IL(in string) string {
|
||||
if in == "" {
|
||||
return in
|
||||
}
|
||||
return strings.ToLower(in[:1]) + in[1:]
|
||||
}
|
||||
|
||||
// Joiner lets you specify functions that preprocess the various components of
|
||||
// a name before joining them. You can construct e.g. camelCase or CamelCase or
|
||||
// any other way of joining words. (See the IC and IL convenience functions.)
|
||||
func Joiner(first, others func(string) string) func(pre string, in []string, post string) string {
|
||||
return func(pre string, in []string, post string) string {
|
||||
tmp := []string{others(pre)}
|
||||
for i := range in {
|
||||
tmp = append(tmp, others(in[i]))
|
||||
}
|
||||
tmp = append(tmp, others(post))
|
||||
return first(strings.Join(tmp, ""))
|
||||
}
|
||||
}
|
||||
|
||||
func (ns *NameStrategy) removePrefixAndSuffix(s string) string {
|
||||
// The join function may have changed capitalization.
|
||||
lowerIn := strings.ToLower(s)
|
||||
lowerP := strings.ToLower(ns.Prefix)
|
||||
lowerS := strings.ToLower(ns.Suffix)
|
||||
b, e := 0, len(s)
|
||||
if strings.HasPrefix(lowerIn, lowerP) {
|
||||
b = len(ns.Prefix)
|
||||
}
|
||||
if strings.HasSuffix(lowerIn, lowerS) {
|
||||
e -= len(ns.Suffix)
|
||||
}
|
||||
return s[b:e]
|
||||
}
|
||||
|
||||
var (
|
||||
importPathNameSanitizer = strings.NewReplacer("-", "_", ".", "")
|
||||
)
|
||||
|
||||
// filters out unwanted directory names and sanitizes remaining names.
|
||||
func (ns *NameStrategy) filterDirs(path string) []string {
|
||||
allDirs := strings.Split(path, GoSeperator)
|
||||
dirs := make([]string, 0, len(allDirs))
|
||||
for _, p := range allDirs {
|
||||
if ns.IgnoreWords == nil || !ns.IgnoreWords[p] {
|
||||
dirs = append(dirs, importPathNameSanitizer.Replace(p))
|
||||
}
|
||||
}
|
||||
return dirs
|
||||
}
|
||||
|
||||
// See the comment on NameStrategy.
|
||||
func (ns *NameStrategy) Name(t *types.Type) string {
|
||||
if ns.Names == nil {
|
||||
ns.Names = Names{}
|
||||
}
|
||||
if s, ok := ns.Names[t]; ok {
|
||||
return s
|
||||
}
|
||||
|
||||
if t.Name.Package != "" {
|
||||
dirs := append(ns.filterDirs(t.Name.Package), t.Name.Name)
|
||||
i := ns.PrependPackageNames + 1
|
||||
dn := len(dirs)
|
||||
if i > dn {
|
||||
i = dn
|
||||
}
|
||||
name := ns.Join(ns.Prefix, dirs[dn-i:], ns.Suffix)
|
||||
ns.Names[t] = name
|
||||
return name
|
||||
}
|
||||
|
||||
// Only anonymous types remain.
|
||||
var name string
|
||||
switch t.Kind {
|
||||
case types.Builtin:
|
||||
name = ns.Join(ns.Prefix, []string{t.Name.Name}, ns.Suffix)
|
||||
case types.Map:
|
||||
name = ns.Join(ns.Prefix, []string{
|
||||
"Map",
|
||||
ns.removePrefixAndSuffix(ns.Name(t.Key)),
|
||||
"To",
|
||||
ns.removePrefixAndSuffix(ns.Name(t.Elem)),
|
||||
}, ns.Suffix)
|
||||
case types.Slice:
|
||||
name = ns.Join(ns.Prefix, []string{
|
||||
"Slice",
|
||||
ns.removePrefixAndSuffix(ns.Name(t.Elem)),
|
||||
}, ns.Suffix)
|
||||
case types.Array:
|
||||
name = ns.Join(ns.Prefix, []string{
|
||||
"Array",
|
||||
ns.removePrefixAndSuffix(fmt.Sprintf("%d", t.Len)),
|
||||
ns.removePrefixAndSuffix(ns.Name(t.Elem)),
|
||||
}, ns.Suffix)
|
||||
case types.Pointer:
|
||||
name = ns.Join(ns.Prefix, []string{
|
||||
"Pointer",
|
||||
ns.removePrefixAndSuffix(ns.Name(t.Elem)),
|
||||
}, ns.Suffix)
|
||||
case types.Struct:
|
||||
names := []string{"Struct"}
|
||||
for _, m := range t.Members {
|
||||
names = append(names, ns.removePrefixAndSuffix(ns.Name(m.Type)))
|
||||
}
|
||||
name = ns.Join(ns.Prefix, names, ns.Suffix)
|
||||
case types.Chan:
|
||||
name = ns.Join(ns.Prefix, []string{
|
||||
"Chan",
|
||||
ns.removePrefixAndSuffix(ns.Name(t.Elem)),
|
||||
}, ns.Suffix)
|
||||
case types.Interface:
|
||||
// TODO: add to name test
|
||||
names := []string{"Interface"}
|
||||
for _, m := range t.Methods {
|
||||
// TODO: include function signature
|
||||
names = append(names, m.Name.Name)
|
||||
}
|
||||
name = ns.Join(ns.Prefix, names, ns.Suffix)
|
||||
case types.Func:
|
||||
// TODO: add to name test
|
||||
parts := []string{"Func"}
|
||||
for _, pt := range t.Signature.Parameters {
|
||||
parts = append(parts, ns.removePrefixAndSuffix(ns.Name(pt)))
|
||||
}
|
||||
parts = append(parts, "Returns")
|
||||
for _, rt := range t.Signature.Results {
|
||||
parts = append(parts, ns.removePrefixAndSuffix(ns.Name(rt)))
|
||||
}
|
||||
name = ns.Join(ns.Prefix, parts, ns.Suffix)
|
||||
default:
|
||||
name = "unnameable_" + string(t.Kind)
|
||||
}
|
||||
ns.Names[t] = name
|
||||
return name
|
||||
}
|
||||
|
||||
// ImportTracker allows a raw namer to keep track of the packages needed for
|
||||
// import. You can implement yourself or use the one in the generation package.
|
||||
type ImportTracker interface {
|
||||
AddType(*types.Type)
|
||||
AddSymbol(types.Name)
|
||||
LocalNameOf(packagePath string) string
|
||||
PathOf(localName string) (string, bool)
|
||||
ImportLines() []string
|
||||
}
|
||||
|
||||
type rawNamer struct {
|
||||
pkg string
|
||||
tracker ImportTracker
|
||||
Names
|
||||
}
|
||||
|
||||
// Name makes a name the way you'd write it to literally refer to type t,
|
||||
// making ordinary assumptions about how you've imported t's package (or using
|
||||
// r.tracker to specifically track the package imports).
|
||||
func (r *rawNamer) Name(t *types.Type) string {
|
||||
if r.Names == nil {
|
||||
r.Names = Names{}
|
||||
}
|
||||
if name, ok := r.Names[t]; ok {
|
||||
return name
|
||||
}
|
||||
if t.Name.Package != "" {
|
||||
var name string
|
||||
if r.tracker != nil {
|
||||
r.tracker.AddType(t)
|
||||
if t.Name.Package == r.pkg {
|
||||
name = t.Name.Name
|
||||
} else {
|
||||
name = r.tracker.LocalNameOf(t.Name.Package) + "." + t.Name.Name
|
||||
}
|
||||
} else {
|
||||
if t.Name.Package == r.pkg {
|
||||
name = t.Name.Name
|
||||
} else {
|
||||
name = filepath.Base(t.Name.Package) + "." + t.Name.Name
|
||||
}
|
||||
}
|
||||
r.Names[t] = name
|
||||
return name
|
||||
}
|
||||
var name string
|
||||
switch t.Kind {
|
||||
case types.Builtin:
|
||||
name = t.Name.Name
|
||||
case types.Map:
|
||||
name = "map[" + r.Name(t.Key) + "]" + r.Name(t.Elem)
|
||||
case types.Slice:
|
||||
name = "[]" + r.Name(t.Elem)
|
||||
case types.Array:
|
||||
l := strconv.Itoa(int(t.Len))
|
||||
name = "[" + l + "]" + r.Name(t.Elem)
|
||||
case types.Pointer:
|
||||
name = "*" + r.Name(t.Elem)
|
||||
case types.Struct:
|
||||
elems := []string{}
|
||||
for _, m := range t.Members {
|
||||
elems = append(elems, m.Name+" "+r.Name(m.Type))
|
||||
}
|
||||
name = "struct{" + strings.Join(elems, "; ") + "}"
|
||||
case types.Chan:
|
||||
// TODO: include directionality
|
||||
name = "chan " + r.Name(t.Elem)
|
||||
case types.Interface:
|
||||
// TODO: add to name test
|
||||
elems := []string{}
|
||||
for _, m := range t.Methods {
|
||||
// TODO: include function signature
|
||||
elems = append(elems, m.Name.Name)
|
||||
}
|
||||
name = "interface{" + strings.Join(elems, "; ") + "}"
|
||||
case types.Func:
|
||||
// TODO: add to name test
|
||||
params := []string{}
|
||||
for _, pt := range t.Signature.Parameters {
|
||||
params = append(params, r.Name(pt))
|
||||
}
|
||||
results := []string{}
|
||||
for _, rt := range t.Signature.Results {
|
||||
results = append(results, r.Name(rt))
|
||||
}
|
||||
name = "func(" + strings.Join(params, ",") + ")"
|
||||
if len(results) == 1 {
|
||||
name += " " + results[0]
|
||||
} else if len(results) > 1 {
|
||||
name += " (" + strings.Join(results, ",") + ")"
|
||||
}
|
||||
default:
|
||||
name = "unnameable_" + string(t.Kind)
|
||||
}
|
||||
r.Names[t] = name
|
||||
return name
|
||||
}
|
||||
72
vendor/k8s.io/gengo/v2/namer/order.go
generated
vendored
Normal file
72
vendor/k8s.io/gengo/v2/namer/order.go
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namer
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// Orderer produces an ordering of types given a Namer.
|
||||
type Orderer struct {
|
||||
Namer
|
||||
}
|
||||
|
||||
// OrderUniverse assigns a name to every type in the Universe, including Types,
|
||||
// Functions and Variables, and returns a list sorted by those names.
|
||||
func (o *Orderer) OrderUniverse(u types.Universe) []*types.Type {
|
||||
list := tList{
|
||||
namer: o.Namer,
|
||||
}
|
||||
for _, p := range u {
|
||||
for _, t := range p.Types {
|
||||
list.types = append(list.types, t)
|
||||
}
|
||||
for _, f := range p.Functions {
|
||||
list.types = append(list.types, f)
|
||||
}
|
||||
for _, v := range p.Variables {
|
||||
list.types = append(list.types, v)
|
||||
}
|
||||
for _, v := range p.Constants {
|
||||
list.types = append(list.types, v)
|
||||
}
|
||||
}
|
||||
sort.Sort(list)
|
||||
return list.types
|
||||
}
|
||||
|
||||
// OrderTypes assigns a name to every type, and returns a list sorted by those
|
||||
// names.
|
||||
func (o *Orderer) OrderTypes(typeList []*types.Type) []*types.Type {
|
||||
list := tList{
|
||||
namer: o.Namer,
|
||||
types: typeList,
|
||||
}
|
||||
sort.Sort(list)
|
||||
return list.types
|
||||
}
|
||||
|
||||
type tList struct {
|
||||
namer Namer
|
||||
types []*types.Type
|
||||
}
|
||||
|
||||
func (t tList) Len() int { return len(t.types) }
|
||||
func (t tList) Less(i, j int) bool { return t.namer.Name(t.types[i]) < t.namer.Name(t.types[j]) }
|
||||
func (t tList) Swap(i, j int) { t.types[i], t.types[j] = t.types[j], t.types[i] }
|
||||
120
vendor/k8s.io/gengo/v2/namer/plural_namer.go
generated
vendored
Normal file
120
vendor/k8s.io/gengo/v2/namer/plural_namer.go
generated
vendored
Normal file
@@ -0,0 +1,120 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package namer
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
var consonants = "bcdfghjklmnpqrstvwxyz"
|
||||
|
||||
type pluralNamer struct {
|
||||
// key is the case-sensitive type name, value is the case-insensitive
|
||||
// intended output.
|
||||
exceptions map[string]string
|
||||
finalize func(string) string
|
||||
}
|
||||
|
||||
// NewPublicPluralNamer returns a namer that returns the plural form of the input
|
||||
// type's name, starting with a uppercase letter.
|
||||
func NewPublicPluralNamer(exceptions map[string]string) *pluralNamer {
|
||||
return &pluralNamer{exceptions, IC}
|
||||
}
|
||||
|
||||
// NewPrivatePluralNamer returns a namer that returns the plural form of the input
|
||||
// type's name, starting with a lowercase letter.
|
||||
func NewPrivatePluralNamer(exceptions map[string]string) *pluralNamer {
|
||||
return &pluralNamer{exceptions, IL}
|
||||
}
|
||||
|
||||
// NewAllLowercasePluralNamer returns a namer that returns the plural form of the input
|
||||
// type's name, with all letters in lowercase.
|
||||
func NewAllLowercasePluralNamer(exceptions map[string]string) *pluralNamer {
|
||||
return &pluralNamer{exceptions, strings.ToLower}
|
||||
}
|
||||
|
||||
// Name returns the plural form of the type's name. If the type's name is found
|
||||
// in the exceptions map, the map value is returned.
|
||||
func (r *pluralNamer) Name(t *types.Type) string {
|
||||
singular := t.Name.Name
|
||||
var plural string
|
||||
var ok bool
|
||||
if plural, ok = r.exceptions[singular]; ok {
|
||||
return r.finalize(plural)
|
||||
}
|
||||
if len(singular) < 2 {
|
||||
return r.finalize(singular)
|
||||
}
|
||||
|
||||
switch rune(singular[len(singular)-1]) {
|
||||
case 's', 'x', 'z':
|
||||
plural = esPlural(singular)
|
||||
case 'y':
|
||||
sl := rune(singular[len(singular)-2])
|
||||
if isConsonant(sl) {
|
||||
plural = iesPlural(singular)
|
||||
} else {
|
||||
plural = sPlural(singular)
|
||||
}
|
||||
case 'h':
|
||||
sl := rune(singular[len(singular)-2])
|
||||
if sl == 'c' || sl == 's' {
|
||||
plural = esPlural(singular)
|
||||
} else {
|
||||
plural = sPlural(singular)
|
||||
}
|
||||
case 'e':
|
||||
sl := rune(singular[len(singular)-2])
|
||||
if sl == 'f' {
|
||||
plural = vesPlural(singular[:len(singular)-1])
|
||||
} else {
|
||||
plural = sPlural(singular)
|
||||
}
|
||||
case 'f':
|
||||
plural = vesPlural(singular)
|
||||
default:
|
||||
plural = sPlural(singular)
|
||||
}
|
||||
return r.finalize(plural)
|
||||
}
|
||||
|
||||
func iesPlural(singular string) string {
|
||||
return singular[:len(singular)-1] + "ies"
|
||||
}
|
||||
|
||||
func vesPlural(singular string) string {
|
||||
return singular[:len(singular)-1] + "ves"
|
||||
}
|
||||
|
||||
func esPlural(singular string) string {
|
||||
return singular + "es"
|
||||
}
|
||||
|
||||
func sPlural(singular string) string {
|
||||
return singular + "s"
|
||||
}
|
||||
|
||||
func isConsonant(char rune) bool {
|
||||
for _, c := range consonants {
|
||||
if char == c {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
19
vendor/k8s.io/gengo/v2/parser/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/gengo/v2/parser/doc.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package parser provides code to parse go files, type-check them, extract the
|
||||
// types.
|
||||
package parser // import "k8s.io/gengo/v2/parser"
|
||||
821
vendor/k8s.io/gengo/v2/parser/parse.go
generated
vendored
Normal file
821
vendor/k8s.io/gengo/v2/parser/parse.go
generated
vendored
Normal file
@@ -0,0 +1,821 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package parser
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/constant"
|
||||
"go/token"
|
||||
gotypes "go/types"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/tools/go/packages"
|
||||
"k8s.io/gengo/v2/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// Parser lets you add all the go files in all the packages that you care
|
||||
// about, then constructs the type source data.
|
||||
type Parser struct {
|
||||
// Map of package paths to definitions. These keys should be canonical
|
||||
// Go import paths (example.com/foo/bar) and not local paths (./foo/bar).
|
||||
goPkgs map[string]*packages.Package
|
||||
|
||||
// Keep track of which packages were directly requested (as opposed to
|
||||
// those which are transitively loaded).
|
||||
userRequested map[string]bool
|
||||
|
||||
// Keep track of which packages have already been scanned for types.
|
||||
fullyProcessed map[string]bool
|
||||
|
||||
// Build tags to set when loading packages.
|
||||
buildTags []string
|
||||
|
||||
// Tracks accumulated parsed files, so we can do position lookups later.
|
||||
fset *token.FileSet
|
||||
|
||||
// All comments from everywhere in every parsed file. This map is keyed by
|
||||
// the file-line on which the comment block ends, which makes it easy to
|
||||
// look up comments which immediately precede a given obect (e.g. a type or
|
||||
// function definition), which is what we almost always want. We need this
|
||||
// because Go's own ast package does a very poor job of handling comments.
|
||||
endLineToCommentGroup map[fileLine]*ast.CommentGroup
|
||||
}
|
||||
|
||||
// key type for finding comments.
|
||||
type fileLine struct {
|
||||
file string
|
||||
line int
|
||||
}
|
||||
|
||||
// New constructs a new Parser.
|
||||
func New() *Parser {
|
||||
return NewWithOptions(Options{})
|
||||
}
|
||||
|
||||
func NewWithOptions(opts Options) *Parser {
|
||||
return &Parser{
|
||||
goPkgs: map[string]*packages.Package{},
|
||||
userRequested: map[string]bool{},
|
||||
fullyProcessed: map[string]bool{},
|
||||
fset: token.NewFileSet(),
|
||||
endLineToCommentGroup: map[fileLine]*ast.CommentGroup{},
|
||||
buildTags: opts.BuildTags,
|
||||
}
|
||||
}
|
||||
|
||||
// Options holds optional settings for the Parser.
|
||||
type Options struct {
|
||||
// BuildTags is a list of optional tags to be specified when loading
|
||||
// packages.
|
||||
BuildTags []string
|
||||
}
|
||||
|
||||
// FindPackages expands the provided patterns into a list of Go import-paths,
|
||||
// much like `go list -find`.
|
||||
func (p *Parser) FindPackages(patterns ...string) ([]string, error) {
|
||||
return p.findPackages(nil, patterns...)
|
||||
}
|
||||
|
||||
// baseCfg is an optional (may be nil) config which might be injected by tests.
|
||||
func (p *Parser) findPackages(baseCfg *packages.Config, patterns ...string) ([]string, error) {
|
||||
toFind := make([]string, 0, len(patterns))
|
||||
results := make([]string, 0, len(patterns))
|
||||
for _, pat := range patterns {
|
||||
if pkg := p.goPkgs[pat]; pkg != nil {
|
||||
results = append(results, pkg.PkgPath)
|
||||
} else {
|
||||
toFind = append(toFind, pat)
|
||||
}
|
||||
}
|
||||
if len(toFind) == 0 {
|
||||
return results, nil
|
||||
}
|
||||
|
||||
cfg := packages.Config{
|
||||
Mode: packages.NeedName | packages.NeedFiles,
|
||||
BuildFlags: []string{"-tags", strings.Join(p.buildTags, ",")},
|
||||
Tests: false,
|
||||
}
|
||||
if baseCfg != nil {
|
||||
// This is to support tests, e.g. to inject a fake GOPATH or CWD.
|
||||
cfg.Dir = baseCfg.Dir
|
||||
cfg.Env = baseCfg.Env
|
||||
}
|
||||
|
||||
pkgs, err := packages.Load(&cfg, toFind...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading packages: %w", err)
|
||||
}
|
||||
var allErrs []error
|
||||
for _, pkg := range pkgs {
|
||||
results = append(results, pkg.PkgPath)
|
||||
|
||||
// pkg.Errors is not a slice of `error`, but concrete types. We have
|
||||
// to iteratively convert each one into `error`.
|
||||
var errs []error
|
||||
for _, e := range pkg.Errors {
|
||||
errs = append(errs, e)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
allErrs = append(allErrs, fmt.Errorf("error(s) in %q:\n%w", pkg.PkgPath, errors.Join(errs...)))
|
||||
}
|
||||
}
|
||||
if len(allErrs) != 0 {
|
||||
return nil, errors.Join(allErrs...)
|
||||
}
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// LoadPackages loads and parses the specified Go packages. Specifically
|
||||
// named packages (without a trailing "/...") which do not exist or have no Go
|
||||
// files are an error.
|
||||
func (p *Parser) LoadPackages(patterns ...string) error {
|
||||
_, err := p.loadPackages(patterns...)
|
||||
return err
|
||||
}
|
||||
|
||||
// LoadPackagesWithConfigForTesting loads and parses the specified Go packages with the
|
||||
// specified packages.Config as a starting point. This is for testing, and
|
||||
// only the .Dir and .Env fields of the Config will be considered.
|
||||
func (p *Parser) LoadPackagesWithConfigForTesting(cfg *packages.Config, patterns ...string) error {
|
||||
_, err := p.loadPackagesWithConfig(cfg, patterns...)
|
||||
return err
|
||||
}
|
||||
|
||||
// LoadPackagesTo loads and parses the specified Go packages, and inserts them
|
||||
// into the specified Universe. It returns the packages which match the
|
||||
// patterns, but loads all packages and their imports, recursively, into the
|
||||
// universe. See NewUniverse for more.
|
||||
func (p *Parser) LoadPackagesTo(u *types.Universe, patterns ...string) ([]*types.Package, error) {
|
||||
// Load Packages.
|
||||
pkgs, err := p.loadPackages(patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Load types in all packages (it will internally filter).
|
||||
if err := p.addPkgsToUniverse(pkgs, u); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Return the results as gengo types.Packages.
|
||||
ret := make([]*types.Package, 0, len(pkgs))
|
||||
for _, pkg := range pkgs {
|
||||
ret = append(ret, u.Package(pkg.PkgPath))
|
||||
}
|
||||
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func (p *Parser) loadPackages(patterns ...string) ([]*packages.Package, error) {
|
||||
return p.loadPackagesWithConfig(nil, patterns...)
|
||||
}
|
||||
|
||||
// baseCfg is an optional (may be nil) config which might be injected by tests.
|
||||
func (p *Parser) loadPackagesWithConfig(baseCfg *packages.Config, patterns ...string) ([]*packages.Package, error) {
|
||||
klog.V(5).Infof("loadPackages %q", patterns)
|
||||
|
||||
// Loading packages is slow - only do ones we know we have not already done
|
||||
// (e.g. if a tool calls LoadPackages itself).
|
||||
existingPkgs, netNewPkgs, err := p.alreadyLoaded(baseCfg, patterns...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if vlog := klog.V(5); vlog.Enabled() {
|
||||
if len(existingPkgs) > 0 {
|
||||
keys := make([]string, 0, len(existingPkgs))
|
||||
for _, p := range existingPkgs {
|
||||
keys = append(keys, p.PkgPath)
|
||||
}
|
||||
vlog.Infof(" already have: %q", keys)
|
||||
}
|
||||
if len(netNewPkgs) > 0 {
|
||||
vlog.Infof(" to be loaded: %q", netNewPkgs)
|
||||
}
|
||||
}
|
||||
|
||||
// If these were not user-requested before, they are now.
|
||||
for _, pkg := range existingPkgs {
|
||||
if !p.userRequested[pkg.PkgPath] {
|
||||
p.userRequested[pkg.PkgPath] = true
|
||||
}
|
||||
}
|
||||
for _, pkg := range netNewPkgs {
|
||||
if !p.userRequested[pkg] {
|
||||
p.userRequested[pkg] = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(netNewPkgs) == 0 {
|
||||
return existingPkgs, nil
|
||||
}
|
||||
|
||||
cfg := packages.Config{
|
||||
Mode: packages.NeedName |
|
||||
packages.NeedFiles | packages.NeedImports | packages.NeedDeps |
|
||||
packages.NeedModule | packages.NeedTypes | packages.NeedSyntax,
|
||||
BuildFlags: []string{"-tags", strings.Join(p.buildTags, ",")},
|
||||
Fset: p.fset,
|
||||
Tests: false,
|
||||
}
|
||||
if baseCfg != nil {
|
||||
// This is to support tests, e.g. to inject a fake GOPATH or CWD.
|
||||
cfg.Dir = baseCfg.Dir
|
||||
cfg.Env = baseCfg.Env
|
||||
}
|
||||
|
||||
tBefore := time.Now()
|
||||
pkgs, err := packages.Load(&cfg, netNewPkgs...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error loading packages: %w", err)
|
||||
}
|
||||
klog.V(5).Infof(" loaded %d pkg(s) in %v", len(pkgs), time.Since(tBefore))
|
||||
|
||||
// Handle any errors.
|
||||
collectErrors := func(pkg *packages.Package) error {
|
||||
var errs []error
|
||||
for _, e := range pkg.Errors {
|
||||
if e.Kind == packages.ListError || e.Kind == packages.ParseError {
|
||||
errs = append(errs, e)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return fmt.Errorf("error(s) in %q:\n%w", pkg.PkgPath, errors.Join(errs...))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := forEachPackageRecursive(pkgs, collectErrors); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Finish integrating packages into our state.
|
||||
absorbPkg := func(pkg *packages.Package) error {
|
||||
p.goPkgs[pkg.PkgPath] = pkg
|
||||
|
||||
for _, f := range pkg.Syntax {
|
||||
for _, c := range f.Comments {
|
||||
// We need to do this on _every_ pkg, not just user-requested
|
||||
// ones, because some generators look at tags in other
|
||||
// packages.
|
||||
//
|
||||
// TODO: It would be nice if we only did this on user-requested
|
||||
// packages. The problem is that we don't always know which
|
||||
// other packages will need this information, and even when we
|
||||
// do we may have already loaded the package (as a transitive
|
||||
// dep) and might have stored pointers into it. Doing a
|
||||
// thorough "reload" without invalidating all those pointers is
|
||||
// a problem for another day.
|
||||
position := p.fset.Position(c.End()) // Fset is synchronized
|
||||
p.endLineToCommentGroup[fileLine{position.Filename, position.Line}] = c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
if err := forEachPackageRecursive(pkgs, absorbPkg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return append(existingPkgs, pkgs...), nil
|
||||
}
|
||||
|
||||
// alreadyLoaded figures out which of the specified patterns have already been loaded
|
||||
// and which have not, and returns those respectively.
|
||||
// baseCfg is an optional (may be nil) config which might be injected by tests.
|
||||
func (p *Parser) alreadyLoaded(baseCfg *packages.Config, patterns ...string) ([]*packages.Package, []string, error) {
|
||||
existingPkgs := make([]*packages.Package, 0, len(patterns))
|
||||
netNewPkgs := make([]string, 0, len(patterns))
|
||||
|
||||
// Expand and canonicalize the requested patterns. This should be fast.
|
||||
if pkgPaths, err := p.findPackages(baseCfg, patterns...); err != nil {
|
||||
return nil, nil, err
|
||||
} else {
|
||||
for _, pkgPath := range pkgPaths {
|
||||
if pkg := p.goPkgs[pkgPath]; pkg != nil {
|
||||
existingPkgs = append(existingPkgs, pkg)
|
||||
} else {
|
||||
netNewPkgs = append(netNewPkgs, pkgPath)
|
||||
}
|
||||
}
|
||||
}
|
||||
return existingPkgs, netNewPkgs, nil
|
||||
}
|
||||
|
||||
// forEachPackageRecursive will run the provided function on all of the specified
|
||||
// packages, and on their imports recursively. Errors are accumulated and
|
||||
// returned as via errors.Join.
|
||||
func forEachPackageRecursive(pkgs []*packages.Package, fn func(pkg *packages.Package) error) error {
|
||||
seen := map[string]bool{} // PkgPaths we have already visited
|
||||
var errs []error
|
||||
for _, pkg := range pkgs {
|
||||
errs = append(errs, recursePackage(pkg, fn, seen)...)
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func recursePackage(pkg *packages.Package, fn func(pkg *packages.Package) error, seen map[string]bool) []error {
|
||||
if seen[pkg.PkgPath] {
|
||||
return nil
|
||||
}
|
||||
var errs []error
|
||||
seen[pkg.PkgPath] = true
|
||||
if err := fn(pkg); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
for _, imp := range pkg.Imports {
|
||||
errs = append(errs, recursePackage(imp, fn, seen)...)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// UserRequestedPackages fetches a list of the user-imported packages.
|
||||
func (p *Parser) UserRequestedPackages() []string {
|
||||
// Iterate packages in a predictable order.
|
||||
pkgPaths := make([]string, 0, len(p.userRequested))
|
||||
for k := range p.userRequested {
|
||||
pkgPaths = append(pkgPaths, string(k))
|
||||
}
|
||||
sort.Strings(pkgPaths)
|
||||
return pkgPaths
|
||||
}
|
||||
|
||||
// NewUniverse finalizes the loaded packages, searches through them for types
|
||||
// and produces a new Universe. The returned Universe has one types.Package
|
||||
// entry for each Go package that has been loaded, including all of their
|
||||
// dependencies, recursively. It also has one entry, whose key is "", which
|
||||
// represents "builtin" types.
|
||||
func (p *Parser) NewUniverse() (types.Universe, error) {
|
||||
u := types.Universe{}
|
||||
|
||||
pkgs := []*packages.Package{}
|
||||
for _, path := range p.UserRequestedPackages() {
|
||||
pkgs = append(pkgs, p.goPkgs[path])
|
||||
}
|
||||
if err := p.addPkgsToUniverse(pkgs, &u); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return u, nil
|
||||
}
|
||||
|
||||
// addCommentsToType takes any accumulated comment lines prior to obj and
|
||||
// attaches them to the type t.
|
||||
func (p *Parser) addCommentsToType(obj gotypes.Object, t *types.Type) {
|
||||
t.CommentLines = p.docComment(obj.Pos())
|
||||
t.SecondClosestCommentLines = p.priorDetachedComment(obj.Pos())
|
||||
}
|
||||
|
||||
// packageDir tries to figure out the directory of the specified package.
|
||||
func packageDir(pkg *packages.Package) (string, error) {
|
||||
// Sometimes Module is present but has no Dir, e.g. when it is vendored.
|
||||
if pkg.Module != nil && pkg.Module.Dir != "" {
|
||||
// NOTE: this will not work if tests are loaded, because Go mutates the
|
||||
// Package.PkgPath.
|
||||
subdir := strings.TrimPrefix(pkg.PkgPath, pkg.Module.Path)
|
||||
return filepath.Join(pkg.Module.Dir, subdir), nil
|
||||
}
|
||||
if len(pkg.GoFiles) > 0 {
|
||||
return filepath.Dir(pkg.GoFiles[0]), nil
|
||||
}
|
||||
if len(pkg.IgnoredFiles) > 0 {
|
||||
return filepath.Dir(pkg.IgnoredFiles[0]), nil
|
||||
}
|
||||
return "", fmt.Errorf("can't find package dir for %q - no module info and no Go files", pkg.PkgPath)
|
||||
}
|
||||
|
||||
// addPkgsToUniverse adds the packages, and all of their deps, recursively, to
|
||||
// the universe and (if needed) searches through them for types.
|
||||
func (p *Parser) addPkgsToUniverse(pkgs []*packages.Package, u *types.Universe) error {
|
||||
addOne := func(pkg *packages.Package) error {
|
||||
if err := p.addPkgToUniverse(pkg, u); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := forEachPackageRecursive(pkgs, addOne); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// addPkgToUniverse adds one package to the universe and (if needed) searches
|
||||
// through it for types.
|
||||
func (p *Parser) addPkgToUniverse(pkg *packages.Package, u *types.Universe) error {
|
||||
pkgPath := pkg.PkgPath
|
||||
if p.fullyProcessed[pkgPath] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// This will get-or-create the Package.
|
||||
gengoPkg := u.Package(pkgPath)
|
||||
|
||||
if gengoPkg.Dir == "" {
|
||||
// We're keeping this package, though we might not fully process it.
|
||||
if vlog := klog.V(5); vlog.Enabled() {
|
||||
why := "user-requested"
|
||||
if !p.userRequested[pkgPath] {
|
||||
why = "dependency"
|
||||
}
|
||||
vlog.Infof("addPkgToUniverse %q (%s)", pkgPath, why)
|
||||
}
|
||||
|
||||
absPath := ""
|
||||
if dir, err := packageDir(pkg); err != nil {
|
||||
return err
|
||||
} else {
|
||||
absPath = dir
|
||||
}
|
||||
|
||||
gengoPkg.Path = pkg.PkgPath
|
||||
gengoPkg.Dir = absPath
|
||||
}
|
||||
|
||||
// If the package was not user-requested, we can stop here.
|
||||
if !p.userRequested[pkgPath] {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mark it as done, so we don't ever re-process it.
|
||||
p.fullyProcessed[pkgPath] = true
|
||||
gengoPkg.Name = pkg.Name
|
||||
|
||||
// For historical reasons we treat files named "doc.go" specially.
|
||||
// TODO: It would be nice to not do this and instead treat package
|
||||
// doc-comments as the "global" config place. This would require changing
|
||||
// most generators and input files.
|
||||
for _, f := range pkg.Syntax {
|
||||
// This gets the filename for the ast.File. Iterating pkg.GoFiles is
|
||||
// documented as unreliable.
|
||||
pos := p.fset.Position(f.FileStart)
|
||||
if filepath.Base(pos.Filename) == "doc.go" {
|
||||
gengoPkg.Comments = []string{}
|
||||
for i := range f.Comments {
|
||||
gengoPkg.Comments = append(gengoPkg.Comments, splitLines(f.Comments[i].Text())...)
|
||||
}
|
||||
if f.Doc != nil {
|
||||
gengoPkg.DocComments = splitLines(f.Doc.Text())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Walk all the types, recursively and save them for later access.
|
||||
s := pkg.Types.Scope()
|
||||
for _, n := range s.Names() {
|
||||
switch obj := s.Lookup(n).(type) {
|
||||
case *gotypes.TypeName:
|
||||
t := p.walkType(*u, nil, obj.Type())
|
||||
p.addCommentsToType(obj, t)
|
||||
case *gotypes.Func:
|
||||
// We only care about functions, not concrete/abstract methods.
|
||||
if obj.Type() != nil && obj.Type().(*gotypes.Signature).Recv() == nil {
|
||||
t := p.addFunction(*u, nil, obj)
|
||||
p.addCommentsToType(obj, t)
|
||||
}
|
||||
case *gotypes.Var:
|
||||
if !obj.IsField() {
|
||||
t := p.addVariable(*u, nil, obj)
|
||||
p.addCommentsToType(obj, t)
|
||||
}
|
||||
case *gotypes.Const:
|
||||
t := p.addConstant(*u, nil, obj)
|
||||
p.addCommentsToType(obj, t)
|
||||
default:
|
||||
klog.Infof("addPkgToUniverse %q: unhandled object of type %T: %v", pkgPath, obj, obj)
|
||||
}
|
||||
}
|
||||
|
||||
// Add all of this package's imports.
|
||||
importedPkgs := []string{}
|
||||
for _, imp := range pkg.Imports {
|
||||
if err := p.addPkgToUniverse(imp, u); err != nil {
|
||||
return err
|
||||
}
|
||||
importedPkgs = append(importedPkgs, imp.PkgPath)
|
||||
}
|
||||
sort.Strings(importedPkgs)
|
||||
u.AddImports(pkg.PkgPath, importedPkgs...)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// If the specified position has a "doc comment", return that.
|
||||
func (p *Parser) docComment(pos token.Pos) []string {
|
||||
// An object's doc comment always ends on the line before the object's own
|
||||
// declaration.
|
||||
c1 := p.priorCommentLines(pos, 1)
|
||||
return splitLines(c1.Text()) // safe even if c1 is nil
|
||||
}
|
||||
|
||||
// If there is a detached (not immediately before a declaration) comment,
|
||||
// return that.
|
||||
func (p *Parser) priorDetachedComment(pos token.Pos) []string {
|
||||
// An object's doc comment always ends on the line before the object's own
|
||||
// declaration.
|
||||
c1 := p.priorCommentLines(pos, 1)
|
||||
|
||||
// Using a literal "2" here is brittle in theory (it means literally 2
|
||||
// lines), but in practice Go code is gofmt'ed (which elides repeated blank
|
||||
// lines), so it works.
|
||||
var c2 *ast.CommentGroup
|
||||
if c1 == nil {
|
||||
c2 = p.priorCommentLines(pos, 2)
|
||||
} else {
|
||||
c2 = p.priorCommentLines(c1.List[0].Slash, 2)
|
||||
}
|
||||
return splitLines(c2.Text()) // safe even if c1 is nil
|
||||
}
|
||||
|
||||
// If there's a comment block which ends nlines before pos, return it.
|
||||
func (p *Parser) priorCommentLines(pos token.Pos, lines int) *ast.CommentGroup {
|
||||
position := p.fset.Position(pos)
|
||||
key := fileLine{position.Filename, position.Line - lines}
|
||||
return p.endLineToCommentGroup[key]
|
||||
}
|
||||
|
||||
func splitLines(str string) []string {
|
||||
return strings.Split(strings.TrimRight(str, "\n"), "\n")
|
||||
}
|
||||
|
||||
func goFuncNameToName(in string) types.Name {
|
||||
name := strings.TrimPrefix(in, "func ")
|
||||
nameParts := strings.Split(name, "(")
|
||||
return goNameToName(nameParts[0])
|
||||
}
|
||||
|
||||
func goVarNameToName(in string) types.Name {
|
||||
nameParts := strings.Split(in, " ")
|
||||
// nameParts[0] is "var".
|
||||
// nameParts[2:] is the type of the variable, we ignore it for now.
|
||||
return goNameToName(nameParts[1])
|
||||
}
|
||||
|
||||
func goNameToName(in string) types.Name {
|
||||
// Detect anonymous type names. (These may have '.' characters because
|
||||
// embedded types may have packages, so we detect them specially.)
|
||||
if strings.HasPrefix(in, "struct{") ||
|
||||
strings.HasPrefix(in, "<-chan") ||
|
||||
strings.HasPrefix(in, "chan<-") ||
|
||||
strings.HasPrefix(in, "chan ") ||
|
||||
strings.HasPrefix(in, "func(") ||
|
||||
strings.HasPrefix(in, "func (") ||
|
||||
strings.HasPrefix(in, "*") ||
|
||||
strings.HasPrefix(in, "map[") ||
|
||||
strings.HasPrefix(in, "[") {
|
||||
return types.Name{Name: in}
|
||||
}
|
||||
|
||||
// Otherwise, if there are '.' characters present, the name has a
|
||||
// package path in front.
|
||||
nameParts := strings.Split(in, ".")
|
||||
name := types.Name{Name: in}
|
||||
if n := len(nameParts); n >= 2 {
|
||||
// The final "." is the name of the type--previous ones must
|
||||
// have been in the package path.
|
||||
name.Package, name.Name = strings.Join(nameParts[:n-1], "."), nameParts[n-1]
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (p *Parser) convertSignature(u types.Universe, t *gotypes.Signature) *types.Signature {
|
||||
signature := &types.Signature{}
|
||||
for i := 0; i < t.Params().Len(); i++ {
|
||||
signature.Parameters = append(signature.Parameters, p.walkType(u, nil, t.Params().At(i).Type()))
|
||||
signature.ParameterNames = append(signature.ParameterNames, t.Params().At(i).Name())
|
||||
}
|
||||
for i := 0; i < t.Results().Len(); i++ {
|
||||
signature.Results = append(signature.Results, p.walkType(u, nil, t.Results().At(i).Type()))
|
||||
signature.ResultNames = append(signature.ResultNames, t.Results().At(i).Name())
|
||||
}
|
||||
if r := t.Recv(); r != nil {
|
||||
signature.Receiver = p.walkType(u, nil, r.Type())
|
||||
}
|
||||
signature.Variadic = t.Variadic()
|
||||
return signature
|
||||
}
|
||||
|
||||
// walkType adds the type, and any necessary child types.
|
||||
func (p *Parser) walkType(u types.Universe, useName *types.Name, in gotypes.Type) *types.Type {
|
||||
// Most of the cases are underlying types of the named type.
|
||||
name := goNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
|
||||
switch t := in.(type) {
|
||||
case *gotypes.Struct:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Struct
|
||||
for i := 0; i < t.NumFields(); i++ {
|
||||
f := t.Field(i)
|
||||
m := types.Member{
|
||||
Name: f.Name(),
|
||||
Embedded: f.Anonymous(),
|
||||
Tags: t.Tag(i),
|
||||
Type: p.walkType(u, nil, f.Type()),
|
||||
CommentLines: p.docComment(f.Pos()),
|
||||
}
|
||||
out.Members = append(out.Members, m)
|
||||
}
|
||||
return out
|
||||
case *gotypes.Map:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Map
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
out.Key = p.walkType(u, nil, t.Key())
|
||||
return out
|
||||
case *gotypes.Pointer:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Pointer
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
return out
|
||||
case *gotypes.Slice:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Slice
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
return out
|
||||
case *gotypes.Array:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Array
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
out.Len = in.(*gotypes.Array).Len()
|
||||
return out
|
||||
case *gotypes.Chan:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Chan
|
||||
out.Elem = p.walkType(u, nil, t.Elem())
|
||||
// TODO: need to store direction, otherwise raw type name
|
||||
// cannot be properly written.
|
||||
return out
|
||||
case *gotypes.Basic:
|
||||
out := u.Type(types.Name{
|
||||
Package: "", // This is a magic package name in the Universe.
|
||||
Name: t.Name(),
|
||||
})
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Unsupported
|
||||
return out
|
||||
case *gotypes.Signature:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Func
|
||||
out.Signature = p.convertSignature(u, t)
|
||||
return out
|
||||
case *gotypes.Interface:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Interface
|
||||
t.Complete()
|
||||
for i := 0; i < t.NumMethods(); i++ {
|
||||
if out.Methods == nil {
|
||||
out.Methods = map[string]*types.Type{}
|
||||
}
|
||||
method := t.Method(i)
|
||||
name := goNameToName(method.String())
|
||||
mt := p.walkType(u, &name, method.Type())
|
||||
mt.CommentLines = p.docComment(method.Pos())
|
||||
out.Methods[method.Name()] = mt
|
||||
}
|
||||
return out
|
||||
case *gotypes.Named:
|
||||
var out *types.Type
|
||||
switch t.Underlying().(type) {
|
||||
case *gotypes.Named, *gotypes.Basic, *gotypes.Map, *gotypes.Slice:
|
||||
name := goNameToName(t.String())
|
||||
out = u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Alias
|
||||
out.Underlying = p.walkType(u, nil, t.Underlying())
|
||||
default:
|
||||
// gotypes package makes everything "named" with an
|
||||
// underlying anonymous type--we remove that annoying
|
||||
// "feature" for users. This flattens those types
|
||||
// together.
|
||||
name := goNameToName(t.String())
|
||||
if out := u.Type(name); out.Kind != types.Unknown {
|
||||
return out // short circuit if we've already made this.
|
||||
}
|
||||
out = p.walkType(u, &name, t.Underlying())
|
||||
}
|
||||
// If the underlying type didn't already add methods, add them.
|
||||
// (Interface types will have already added methods.)
|
||||
if len(out.Methods) == 0 {
|
||||
for i := 0; i < t.NumMethods(); i++ {
|
||||
if out.Methods == nil {
|
||||
out.Methods = map[string]*types.Type{}
|
||||
}
|
||||
method := t.Method(i)
|
||||
name := goNameToName(method.String())
|
||||
mt := p.walkType(u, &name, method.Type())
|
||||
mt.CommentLines = p.docComment(method.Pos())
|
||||
out.Methods[method.Name()] = mt
|
||||
}
|
||||
}
|
||||
return out
|
||||
default:
|
||||
out := u.Type(name)
|
||||
if out.Kind != types.Unknown {
|
||||
return out
|
||||
}
|
||||
out.Kind = types.Unsupported
|
||||
klog.Warningf("Making unsupported type entry %q for: %#v\n", out, t)
|
||||
return out
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) addFunction(u types.Universe, useName *types.Name, in *gotypes.Func) *types.Type {
|
||||
name := goFuncNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Function(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = p.walkType(u, nil, in.Type())
|
||||
return out
|
||||
}
|
||||
|
||||
func (p *Parser) addVariable(u types.Universe, useName *types.Name, in *gotypes.Var) *types.Type {
|
||||
name := goVarNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Variable(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = p.walkType(u, nil, in.Type())
|
||||
return out
|
||||
}
|
||||
|
||||
func (p *Parser) addConstant(u types.Universe, useName *types.Name, in *gotypes.Const) *types.Type {
|
||||
name := goVarNameToName(in.String())
|
||||
if useName != nil {
|
||||
name = *useName
|
||||
}
|
||||
out := u.Constant(name)
|
||||
out.Kind = types.DeclarationOf
|
||||
out.Underlying = p.walkType(u, nil, in.Type())
|
||||
|
||||
var constval string
|
||||
|
||||
// For strings, we use `StringVal()` to get the un-truncated,
|
||||
// un-quoted string. For other values, `.String()` is preferable to
|
||||
// get something relatively human readable (especially since for
|
||||
// floating point types, `ExactString()` will generate numeric
|
||||
// expressions using `big.(*Float).Text()`.
|
||||
switch in.Val().Kind() {
|
||||
case constant.String:
|
||||
constval = constant.StringVal(in.Val())
|
||||
default:
|
||||
constval = in.Val().String()
|
||||
}
|
||||
|
||||
out.ConstValue = &constval
|
||||
return out
|
||||
}
|
||||
19
vendor/k8s.io/gengo/v2/types/doc.go
generated
vendored
Normal file
19
vendor/k8s.io/gengo/v2/types/doc.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package types contains go type information, packaged in a way that makes
|
||||
// auto-generation convenient, whether by template or straight go functions.
|
||||
package types // import "k8s.io/gengo/v2/types"
|
||||
539
vendor/k8s.io/gengo/v2/types/types.go
generated
vendored
Normal file
539
vendor/k8s.io/gengo/v2/types/types.go
generated
vendored
Normal file
@@ -0,0 +1,539 @@
|
||||
/*
|
||||
Copyright 2015 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package types
|
||||
|
||||
import "strings"
|
||||
|
||||
// Ref makes a reference to the given type. It can only be used for e.g.
|
||||
// passing to namers.
|
||||
func Ref(packageName, typeName string) *Type {
|
||||
return &Type{Name: Name{
|
||||
Name: typeName,
|
||||
Package: packageName,
|
||||
}}
|
||||
}
|
||||
|
||||
// A type name may have a package qualifier.
|
||||
type Name struct {
|
||||
// Empty if embedded or builtin. This is the package path unless Path is specified.
|
||||
Package string
|
||||
// The type name.
|
||||
Name string
|
||||
// An optional location of the type definition for languages that can have disjoint
|
||||
// packages and paths.
|
||||
Path string
|
||||
}
|
||||
|
||||
// String returns the name formatted as a string.
|
||||
func (n Name) String() string {
|
||||
if n.Package == "" {
|
||||
return n.Name
|
||||
}
|
||||
return n.Package + "." + n.Name
|
||||
}
|
||||
|
||||
// ParseFullyQualifiedName parses a name like k8s.io/kubernetes/pkg/api.Pod into a Name.
|
||||
func ParseFullyQualifiedName(fqn string) Name {
|
||||
cs := strings.Split(fqn, ".")
|
||||
pkg := ""
|
||||
if len(cs) > 1 {
|
||||
pkg = strings.Join(cs[0:len(cs)-1], ".")
|
||||
}
|
||||
return Name{
|
||||
Name: cs[len(cs)-1],
|
||||
Package: pkg,
|
||||
}
|
||||
}
|
||||
|
||||
// The possible classes of types.
|
||||
type Kind string
|
||||
|
||||
const (
|
||||
// Builtin is a primitive, like bool, string, int.
|
||||
Builtin Kind = "Builtin"
|
||||
Struct Kind = "Struct"
|
||||
Map Kind = "Map"
|
||||
Slice Kind = "Slice"
|
||||
Pointer Kind = "Pointer"
|
||||
|
||||
// Alias is an alias of another type, e.g. in:
|
||||
// type Foo string
|
||||
// type Bar Foo
|
||||
// Bar is an alias of Foo.
|
||||
//
|
||||
// In the real go type system, Foo is a "Named" string; but to simplify
|
||||
// generation, this type system will just say that Foo *is* a builtin.
|
||||
// We then need "Alias" as a way for us to say that Bar *is* a Foo.
|
||||
Alias Kind = "Alias"
|
||||
|
||||
// Interface is any type that could have differing types at run time.
|
||||
Interface Kind = "Interface"
|
||||
|
||||
// Array is just like slice, but has a fixed length.
|
||||
Array Kind = "Array"
|
||||
|
||||
// The remaining types are included for completeness, but are not well
|
||||
// supported.
|
||||
Chan Kind = "Chan"
|
||||
Func Kind = "Func"
|
||||
|
||||
// DeclarationOf is different from other Kinds; it indicates that instead of
|
||||
// representing an actual Type, the type is a declaration of an instance of
|
||||
// a type. E.g., a top-level function, variable, or constant. See the
|
||||
// comment for Type.Name for more detail.
|
||||
DeclarationOf Kind = "DeclarationOf"
|
||||
Unknown Kind = ""
|
||||
Unsupported Kind = "Unsupported"
|
||||
|
||||
// Protobuf is protobuf type.
|
||||
Protobuf Kind = "Protobuf"
|
||||
)
|
||||
|
||||
// Package holds package-level information.
|
||||
// Fields are public, as everything in this package, to enable consumption by
|
||||
// templates (for example). But it is strongly encouraged for code to build by
|
||||
// using the provided functions.
|
||||
type Package struct {
|
||||
// Canonical import-path of this package.
|
||||
Path string
|
||||
|
||||
// The location (on disk) of this package.
|
||||
Dir string
|
||||
|
||||
// Short name of this package, as in the 'package x' line.
|
||||
Name string
|
||||
|
||||
// The comment right above the package declaration in doc.go, if any.
|
||||
DocComments []string
|
||||
|
||||
// All comments from doc.go, if any.
|
||||
// TODO: remove Comments and use DocComments everywhere.
|
||||
Comments []string
|
||||
|
||||
// Types within this package, indexed by their name (*not* including
|
||||
// package name).
|
||||
Types map[string]*Type
|
||||
|
||||
// Functions within this package, indexed by their name (*not* including
|
||||
// package name).
|
||||
Functions map[string]*Type
|
||||
|
||||
// Global variables within this package, indexed by their name (*not* including
|
||||
// package name).
|
||||
Variables map[string]*Type
|
||||
|
||||
// Global constants within this package, indexed by their name (*not* including
|
||||
// package name).
|
||||
Constants map[string]*Type
|
||||
|
||||
// Packages imported by this package, indexed by (canonicalized)
|
||||
// package path.
|
||||
Imports map[string]*Package
|
||||
}
|
||||
|
||||
// Has returns true if the given name references a type known to this package.
|
||||
func (p *Package) Has(name string) bool {
|
||||
_, has := p.Types[name]
|
||||
return has
|
||||
}
|
||||
|
||||
// Type gets the given Type in this Package. If the Type is not already
|
||||
// defined, this will add it and return the new Type value. The caller is
|
||||
// expected to finish initialization.
|
||||
func (p *Package) Type(typeName string) *Type {
|
||||
if t, ok := p.Types[typeName]; ok {
|
||||
return t
|
||||
}
|
||||
if p.Path == "" {
|
||||
// Import the standard builtin types!
|
||||
if t, ok := builtins.Types[typeName]; ok {
|
||||
p.Types[typeName] = t
|
||||
return t
|
||||
}
|
||||
}
|
||||
t := &Type{Name: Name{Package: p.Path, Name: typeName}}
|
||||
p.Types[typeName] = t
|
||||
return t
|
||||
}
|
||||
|
||||
// Function gets the given function Type in this Package. If the function is
|
||||
// not already defined, this will add it. If a function is added, it's the
|
||||
// caller's responsibility to finish construction of the function by setting
|
||||
// Underlying to the correct type.
|
||||
func (p *Package) Function(funcName string) *Type {
|
||||
if t, ok := p.Functions[funcName]; ok {
|
||||
return t
|
||||
}
|
||||
t := &Type{Name: Name{Package: p.Path, Name: funcName}}
|
||||
t.Kind = DeclarationOf
|
||||
p.Functions[funcName] = t
|
||||
return t
|
||||
}
|
||||
|
||||
// Variable gets the given variable Type in this Package. If the variable is
|
||||
// not already defined, this will add it. If a variable is added, it's the caller's
|
||||
// responsibility to finish construction of the variable by setting Underlying
|
||||
// to the correct type.
|
||||
func (p *Package) Variable(varName string) *Type {
|
||||
if t, ok := p.Variables[varName]; ok {
|
||||
return t
|
||||
}
|
||||
t := &Type{Name: Name{Package: p.Path, Name: varName}}
|
||||
t.Kind = DeclarationOf
|
||||
p.Variables[varName] = t
|
||||
return t
|
||||
}
|
||||
|
||||
// Constant gets the given constant Type in this Package. If the constant is
|
||||
// not already defined, this will add it. If a constant is added, it's the caller's
|
||||
// responsibility to finish construction of the constant by setting Underlying
|
||||
// to the correct type.
|
||||
func (p *Package) Constant(constName string) *Type {
|
||||
if t, ok := p.Constants[constName]; ok {
|
||||
return t
|
||||
}
|
||||
t := &Type{Name: Name{Package: p.Path, Name: constName}}
|
||||
t.Kind = DeclarationOf
|
||||
p.Constants[constName] = t
|
||||
return t
|
||||
}
|
||||
|
||||
// HasImport returns true if p imports packageName. Package names include the
|
||||
// package directory.
|
||||
func (p *Package) HasImport(packageName string) bool {
|
||||
_, has := p.Imports[packageName]
|
||||
return has
|
||||
}
|
||||
|
||||
// Universe is a map of all packages. The key is the package name, but you
|
||||
// should use Package(), Type(), Function(), or Variable() instead of direct
|
||||
// access.
|
||||
type Universe map[string]*Package
|
||||
|
||||
// Type returns the canonical type for the given fully-qualified name. Builtin
|
||||
// types will always be found, even if they haven't been explicitly added to
|
||||
// the map. If a non-existing type is requested, this will create (a marker for)
|
||||
// it.
|
||||
func (u Universe) Type(n Name) *Type {
|
||||
return u.Package(n.Package).Type(n.Name)
|
||||
}
|
||||
|
||||
// Function returns the canonical function for the given fully-qualified name.
|
||||
// If a non-existing function is requested, this will create (a marker for) it.
|
||||
// If a marker is created, it's the caller's responsibility to finish
|
||||
// construction of the function by setting Underlying to the correct type.
|
||||
func (u Universe) Function(n Name) *Type {
|
||||
return u.Package(n.Package).Function(n.Name)
|
||||
}
|
||||
|
||||
// Variable returns the canonical variable for the given fully-qualified name.
|
||||
// If a non-existing variable is requested, this will create (a marker for) it.
|
||||
// If a marker is created, it's the caller's responsibility to finish
|
||||
// construction of the variable by setting Underlying to the correct type.
|
||||
func (u Universe) Variable(n Name) *Type {
|
||||
return u.Package(n.Package).Variable(n.Name)
|
||||
}
|
||||
|
||||
// Constant returns the canonical constant for the given fully-qualified name.
|
||||
// If a non-existing constant is requested, this will create (a marker for) it.
|
||||
// If a marker is created, it's the caller's responsibility to finish
|
||||
// construction of the constant by setting Underlying to the correct type.
|
||||
func (u Universe) Constant(n Name) *Type {
|
||||
return u.Package(n.Package).Constant(n.Name)
|
||||
}
|
||||
|
||||
// AddImports registers import lines for packageName. May be called multiple times.
|
||||
// You are responsible for canonicalizing all package paths.
|
||||
func (u Universe) AddImports(packagePath string, importPaths ...string) {
|
||||
p := u.Package(packagePath)
|
||||
for _, i := range importPaths {
|
||||
p.Imports[i] = u.Package(i)
|
||||
}
|
||||
}
|
||||
|
||||
// Package returns the Package for the given path.
|
||||
// If a non-existing package is requested, this will create (a marker for) it.
|
||||
// If a marker is created, it's the caller's responsibility to finish
|
||||
// construction of the package.
|
||||
func (u Universe) Package(packagePath string) *Package {
|
||||
if p, ok := u[packagePath]; ok {
|
||||
return p
|
||||
}
|
||||
p := &Package{
|
||||
Path: packagePath,
|
||||
Types: map[string]*Type{},
|
||||
Functions: map[string]*Type{},
|
||||
Variables: map[string]*Type{},
|
||||
Constants: map[string]*Type{},
|
||||
Imports: map[string]*Package{},
|
||||
}
|
||||
u[packagePath] = p
|
||||
return p
|
||||
}
|
||||
|
||||
// Type represents a subset of possible go types.
|
||||
type Type struct {
|
||||
// There are two general categories of types, those explicitly named
|
||||
// and those anonymous. Named ones will have a non-empty package in the
|
||||
// name field.
|
||||
//
|
||||
// An exception: If Kind == DeclarationOf, then this name is the name of a
|
||||
// top-level function, variable, or const, and the type can be found in Underlying.
|
||||
// We do this to allow the naming system to work against these objects, even
|
||||
// though they aren't strictly speaking types.
|
||||
Name Name
|
||||
|
||||
// The general kind of this type.
|
||||
Kind Kind
|
||||
|
||||
// If there are comment lines immediately before the type definition,
|
||||
// they will be recorded here.
|
||||
CommentLines []string
|
||||
|
||||
// If there are comment lines preceding the `CommentLines`, they will be
|
||||
// recorded here. There are two cases:
|
||||
// ---
|
||||
// SecondClosestCommentLines
|
||||
// a blank line
|
||||
// CommentLines
|
||||
// type definition
|
||||
// ---
|
||||
//
|
||||
// or
|
||||
// ---
|
||||
// SecondClosestCommentLines
|
||||
// a blank line
|
||||
// type definition
|
||||
// ---
|
||||
SecondClosestCommentLines []string
|
||||
|
||||
// If Kind == Struct
|
||||
Members []Member
|
||||
|
||||
// If Kind == Map, Slice, Pointer, or Chan
|
||||
Elem *Type
|
||||
|
||||
// If Kind == Map, this is the map's key type.
|
||||
Key *Type
|
||||
|
||||
// If Kind == Alias, this is the underlying type.
|
||||
// If Kind == DeclarationOf, this is the type of the declaration.
|
||||
Underlying *Type
|
||||
|
||||
// If Kind == Interface, this is the set of all required functions.
|
||||
// Otherwise, if this is a named type, this is the list of methods that
|
||||
// type has. (All elements will have Kind=="Func")
|
||||
Methods map[string]*Type
|
||||
|
||||
// If Kind == func, this is the signature of the function.
|
||||
Signature *Signature
|
||||
|
||||
// ConstValue contains a stringified constant value if
|
||||
// Kind == DeclarationOf and this is a constant value
|
||||
// declaration. For string constants, this field contains
|
||||
// the entire, un-quoted value. For other types, it contains
|
||||
// a human-readable literal.
|
||||
ConstValue *string
|
||||
|
||||
// TODO: Add:
|
||||
// * channel direction
|
||||
|
||||
// If Kind == Array
|
||||
Len int64
|
||||
}
|
||||
|
||||
// String returns the name of the type.
|
||||
func (t *Type) String() string {
|
||||
if t == nil {
|
||||
return "" // makes tests easier
|
||||
}
|
||||
return t.Name.String()
|
||||
}
|
||||
|
||||
// IsPrimitive returns whether the type is a built-in type or is an alias to a
|
||||
// built-in type. For example: strings and aliases of strings are primitives,
|
||||
// structs are not.
|
||||
func (t *Type) IsPrimitive() bool {
|
||||
if t.Kind == Builtin || (t.Kind == Alias && t.Underlying.Kind == Builtin) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsAssignable returns whether the type is deep-assignable. For example,
|
||||
// slices and maps and pointers are shallow copies, but ints and strings are
|
||||
// complete.
|
||||
func (t *Type) IsAssignable() bool {
|
||||
if t.IsPrimitive() {
|
||||
return true
|
||||
}
|
||||
if t.Kind == Struct {
|
||||
for _, m := range t.Members {
|
||||
if !m.Type.IsAssignable() {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsAnonymousStruct returns true if the type is an anonymous struct or an alias
|
||||
// to an anonymous struct.
|
||||
func (t *Type) IsAnonymousStruct() bool {
|
||||
return (t.Kind == Struct && t.Name.Name == "struct{}") || (t.Kind == Alias && t.Underlying.IsAnonymousStruct())
|
||||
}
|
||||
|
||||
// A single struct member
|
||||
type Member struct {
|
||||
// The name of the member.
|
||||
Name string
|
||||
|
||||
// If the member is embedded (anonymous) this will be true, and the
|
||||
// Name will be the type name.
|
||||
Embedded bool
|
||||
|
||||
// If there are comment lines immediately before the member in the type
|
||||
// definition, they will be recorded here.
|
||||
CommentLines []string
|
||||
|
||||
// If there are tags along with this member, they will be saved here.
|
||||
Tags string
|
||||
|
||||
// The type of this member.
|
||||
Type *Type
|
||||
}
|
||||
|
||||
// String returns the name and type of the member.
|
||||
func (m Member) String() string {
|
||||
return m.Name + " " + m.Type.String()
|
||||
}
|
||||
|
||||
// Signature is a function's signature.
|
||||
type Signature struct {
|
||||
// If a method of some type, this is the type it's a member of.
|
||||
Receiver *Type
|
||||
Parameters []*Type
|
||||
ParameterNames []string
|
||||
Results []*Type
|
||||
ResultNames []string
|
||||
|
||||
// True if the last in parameter is of the form ...T.
|
||||
Variadic bool
|
||||
|
||||
// If there are comment lines immediately before this
|
||||
// signature/method/function declaration, they will be recorded here.
|
||||
CommentLines []string
|
||||
}
|
||||
|
||||
// Built in types.
|
||||
var (
|
||||
String = &Type{
|
||||
Name: Name{Name: "string"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Int64 = &Type{
|
||||
Name: Name{Name: "int64"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Int32 = &Type{
|
||||
Name: Name{Name: "int32"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Int16 = &Type{
|
||||
Name: Name{Name: "int16"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Int = &Type{
|
||||
Name: Name{Name: "int"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Uint64 = &Type{
|
||||
Name: Name{Name: "uint64"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Uint32 = &Type{
|
||||
Name: Name{Name: "uint32"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Uint16 = &Type{
|
||||
Name: Name{Name: "uint16"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Uint = &Type{
|
||||
Name: Name{Name: "uint"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Uintptr = &Type{
|
||||
Name: Name{Name: "uintptr"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Float64 = &Type{
|
||||
Name: Name{Name: "float64"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Float32 = &Type{
|
||||
Name: Name{Name: "float32"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Float = &Type{
|
||||
Name: Name{Name: "float"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Bool = &Type{
|
||||
Name: Name{Name: "bool"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
Byte = &Type{
|
||||
Name: Name{Name: "byte"},
|
||||
Kind: Builtin,
|
||||
}
|
||||
|
||||
builtins = &Package{
|
||||
Types: map[string]*Type{
|
||||
"bool": Bool,
|
||||
"string": String,
|
||||
"int": Int,
|
||||
"int64": Int64,
|
||||
"int32": Int32,
|
||||
"int16": Int16,
|
||||
"int8": Byte,
|
||||
"uint": Uint,
|
||||
"uint64": Uint64,
|
||||
"uint32": Uint32,
|
||||
"uint16": Uint16,
|
||||
"uint8": Byte,
|
||||
"uintptr": Uintptr,
|
||||
"byte": Byte,
|
||||
"float": Float,
|
||||
"float64": Float64,
|
||||
"float32": Float32,
|
||||
},
|
||||
Imports: map[string]*Package{},
|
||||
Path: "",
|
||||
Name: "",
|
||||
}
|
||||
)
|
||||
|
||||
func IsInteger(t *Type) bool {
|
||||
switch t {
|
||||
case Int, Int64, Int32, Int16, Uint, Uint64, Uint32, Uint16, Byte:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
64
vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go
generated
vendored
64
vendor/k8s.io/kube-openapi/cmd/openapi-gen/args/args.go
generated
vendored
@@ -18,59 +18,61 @@ package args
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
"k8s.io/gengo/args"
|
||||
)
|
||||
|
||||
// CustomArgs is used by the gengo framework to pass args specific to this generator.
|
||||
type CustomArgs struct {
|
||||
// ReportFilename is added to CustomArgs for specifying name of report file used
|
||||
type Args struct {
|
||||
OutputDir string // must be a directory path
|
||||
OutputPkg string // must be a Go import-path
|
||||
OutputFile string
|
||||
|
||||
GoHeaderFile string
|
||||
|
||||
// ReportFilename is added to Args for specifying name of report file used
|
||||
// by API linter. If specified, API rule violations will be printed to report file.
|
||||
// Otherwise default value "-" will be used which indicates stdout.
|
||||
ReportFilename string
|
||||
}
|
||||
|
||||
// NewDefaults returns default arguments for the generator. Returning the arguments instead
|
||||
// New returns default arguments for the generator. Returning the arguments instead
|
||||
// of using default flag parsing allows registering custom arguments afterwards
|
||||
func NewDefaults() (*args.GeneratorArgs, *CustomArgs) {
|
||||
// Default() sets a couple of flag default values for example the boilerplate.
|
||||
// WithoutDefaultFlagParsing() disables implicit addition of command line flags and parsing,
|
||||
// which allows registering custom arguments afterwards
|
||||
genericArgs := args.Default().WithoutDefaultFlagParsing()
|
||||
genericArgs.GoHeaderFilePath = filepath.Join(args.DefaultSourceTree(), "k8s.io/kube-openapi/boilerplate/boilerplate.go.txt")
|
||||
|
||||
customArgs := &CustomArgs{}
|
||||
genericArgs.CustomArgs = customArgs
|
||||
func New() *Args {
|
||||
args := &Args{}
|
||||
|
||||
// Default value for report filename is "-", which stands for stdout
|
||||
customArgs.ReportFilename = "-"
|
||||
// Default value for output file base name
|
||||
genericArgs.OutputFileBaseName = "openapi_generated"
|
||||
args.ReportFilename = "-"
|
||||
|
||||
return genericArgs, customArgs
|
||||
return args
|
||||
}
|
||||
|
||||
// AddFlags add the generator flags to the flag set.
|
||||
func (c *CustomArgs) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVarP(&c.ReportFilename, "report-filename", "r", c.ReportFilename, "Name of report file used by API linter to print API violations. Default \"-\" stands for standard output. NOTE that if valid filename other than \"-\" is specified, API linter won't return error on detected API violations. This allows further check of existing API violations without stopping the OpenAPI generation toolchain.")
|
||||
func (args *Args) AddFlags(fs *pflag.FlagSet) {
|
||||
fs.StringVar(&args.OutputDir, "output-dir", "",
|
||||
"the base directory under which to generate results")
|
||||
fs.StringVar(&args.OutputPkg, "output-pkg", "",
|
||||
"the base Go import-path under which to generate results")
|
||||
fs.StringVar(&args.OutputFile, "output-file", "generated.openapi.go",
|
||||
"the name of the file to be generated")
|
||||
fs.StringVar(&args.GoHeaderFile, "go-header-file", "",
|
||||
"the path to a file containing boilerplate header text; the string \"YEAR\" will be replaced with the current 4-digit year")
|
||||
fs.StringVarP(&args.ReportFilename, "report-filename", "r", args.ReportFilename,
|
||||
"Name of report file used by API linter to print API violations. Default \"-\" stands for standard output. NOTE that if valid filename other than \"-\" is specified, API linter won't return error on detected API violations. This allows further check of existing API violations without stopping the OpenAPI generation toolchain.")
|
||||
}
|
||||
|
||||
// Validate checks the given arguments.
|
||||
func Validate(genericArgs *args.GeneratorArgs) error {
|
||||
c, ok := genericArgs.CustomArgs.(*CustomArgs)
|
||||
if !ok {
|
||||
return fmt.Errorf("input arguments don't contain valid custom arguments")
|
||||
func (args *Args) Validate() error {
|
||||
if len(args.OutputDir) == 0 {
|
||||
return fmt.Errorf("--output-dir must be specified")
|
||||
}
|
||||
if len(c.ReportFilename) == 0 {
|
||||
return fmt.Errorf("report filename cannot be empty. specify a valid filename or use \"-\" for stdout")
|
||||
if len(args.OutputPkg) == 0 {
|
||||
return fmt.Errorf("--output-pkg must be specified")
|
||||
}
|
||||
if len(genericArgs.OutputFileBaseName) == 0 {
|
||||
return fmt.Errorf("output file base name cannot be empty")
|
||||
if len(args.OutputFile) == 0 {
|
||||
return fmt.Errorf("--output-file must be specified")
|
||||
}
|
||||
if len(genericArgs.OutputPackagePath) == 0 {
|
||||
return fmt.Errorf("output package cannot be empty")
|
||||
if len(args.ReportFilename) == 0 {
|
||||
return fmt.Errorf("--report-filename must be specified (use \"-\" for stdout)")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
25
vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go
generated
vendored
25
vendor/k8s.io/kube-openapi/cmd/openapi-gen/openapi-gen.go
generated
vendored
@@ -24,33 +24,38 @@ import (
|
||||
"flag"
|
||||
"log"
|
||||
|
||||
generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args"
|
||||
"k8s.io/kube-openapi/pkg/generators"
|
||||
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/gengo/v2"
|
||||
"k8s.io/gengo/v2/generator"
|
||||
"k8s.io/klog/v2"
|
||||
"k8s.io/kube-openapi/cmd/openapi-gen/args"
|
||||
"k8s.io/kube-openapi/pkg/generators"
|
||||
)
|
||||
|
||||
func main() {
|
||||
klog.InitFlags(nil)
|
||||
genericArgs, customArgs := generatorargs.NewDefaults()
|
||||
args := args.New()
|
||||
|
||||
genericArgs.AddFlags(pflag.CommandLine)
|
||||
customArgs.AddFlags(pflag.CommandLine)
|
||||
args.AddFlags(pflag.CommandLine)
|
||||
flag.Set("logtostderr", "true")
|
||||
pflag.CommandLine.AddGoFlagSet(flag.CommandLine)
|
||||
pflag.Parse()
|
||||
|
||||
if err := generatorargs.Validate(genericArgs); err != nil {
|
||||
if err := args.Validate(); err != nil {
|
||||
log.Fatalf("Arguments validation error: %v", err)
|
||||
}
|
||||
|
||||
myTargets := func(context *generator.Context) []generator.Target {
|
||||
return generators.GetTargets(context, args)
|
||||
}
|
||||
|
||||
// Generates the code for the OpenAPIDefinitions.
|
||||
if err := genericArgs.Execute(
|
||||
if err := gengo.Execute(
|
||||
generators.NameSystems(),
|
||||
generators.DefaultNameSystem(),
|
||||
generators.Packages,
|
||||
myTargets,
|
||||
gengo.StdBuildTag,
|
||||
pflag.Args(),
|
||||
); err != nil {
|
||||
log.Fatalf("OpenAPI code generation error: %v", err)
|
||||
}
|
||||
|
||||
3
vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go
generated
vendored
3
vendor/k8s.io/kube-openapi/pkg/builder3/openapi.go
generated
vendored
@@ -326,6 +326,9 @@ func BuildOpenAPISpecFromRoutes(webServices []common.RouteContainer, config *com
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if config.PostProcessSpec != nil {
|
||||
return config.PostProcessSpec(a.spec)
|
||||
}
|
||||
return a.spec, nil
|
||||
}
|
||||
|
||||
|
||||
3
vendor/k8s.io/kube-openapi/pkg/common/common.go
generated
vendored
3
vendor/k8s.io/kube-openapi/pkg/common/common.go
generated
vendored
@@ -164,6 +164,9 @@ type OpenAPIV3Config struct {
|
||||
// It is an optional function to customize model names.
|
||||
GetDefinitionName func(name string) (string, spec.Extensions)
|
||||
|
||||
// PostProcessSpec runs after the spec is ready to serve. It allows a final modification to the spec before serving.
|
||||
PostProcessSpec func(*spec3.OpenAPI) (*spec3.OpenAPI, error)
|
||||
|
||||
// SecuritySchemes is list of all security schemes for OpenAPI service.
|
||||
SecuritySchemes spec3.SecuritySchemes
|
||||
|
||||
|
||||
6
vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go
generated
vendored
6
vendor/k8s.io/kube-openapi/pkg/generators/api_linter.go
generated
vendored
@@ -25,8 +25,8 @@ import (
|
||||
|
||||
"k8s.io/kube-openapi/pkg/generators/rules"
|
||||
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/generator"
|
||||
"k8s.io/gengo/v2/types"
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
@@ -94,7 +94,7 @@ func newAPIViolationGen() *apiViolationGen {
|
||||
}
|
||||
|
||||
type apiViolationGen struct {
|
||||
generator.DefaultGen
|
||||
generator.GoGenerator
|
||||
|
||||
linter *apiLinter
|
||||
}
|
||||
|
||||
45
vendor/k8s.io/kube-openapi/pkg/generators/config.go
generated
vendored
45
vendor/k8s.io/kube-openapi/pkg/generators/config.go
generated
vendored
@@ -17,16 +17,14 @@ limitations under the License.
|
||||
package generators
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"path"
|
||||
|
||||
"k8s.io/gengo/args"
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2"
|
||||
"k8s.io/gengo/v2/generator"
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
"k8s.io/klog/v2"
|
||||
|
||||
generatorargs "k8s.io/kube-openapi/cmd/openapi-gen/args"
|
||||
"k8s.io/kube-openapi/cmd/openapi-gen/args"
|
||||
)
|
||||
|
||||
type identityNamer struct{}
|
||||
@@ -51,36 +49,31 @@ func DefaultNameSystem() string {
|
||||
return "sorting_namer"
|
||||
}
|
||||
|
||||
func Packages(context *generator.Context, arguments *args.GeneratorArgs) generator.Packages {
|
||||
boilerplate, err := arguments.LoadGoBoilerplate()
|
||||
func GetTargets(context *generator.Context, args *args.Args) []generator.Target {
|
||||
boilerplate, err := gengo.GoBoilerplate(args.GoHeaderFile, gengo.StdBuildTag, gengo.StdGeneratedBy)
|
||||
if err != nil {
|
||||
klog.Fatalf("Failed loading boilerplate: %v", err)
|
||||
}
|
||||
header := append([]byte(fmt.Sprintf("// +build !%s\n\n", arguments.GeneratedBuildTag)), boilerplate...)
|
||||
header = append(header, []byte(
|
||||
`
|
||||
// This file was autogenerated by openapi-gen. Do not edit it manually!
|
||||
|
||||
`)...)
|
||||
|
||||
reportPath := "-"
|
||||
if customArgs, ok := arguments.CustomArgs.(*generatorargs.CustomArgs); ok {
|
||||
reportPath = customArgs.ReportFilename
|
||||
if args.ReportFilename != "" {
|
||||
reportPath = args.ReportFilename
|
||||
}
|
||||
context.FileTypes[apiViolationFileType] = apiViolationFile{
|
||||
unmangledPath: reportPath,
|
||||
}
|
||||
|
||||
return generator.Packages{
|
||||
&generator.DefaultPackage{
|
||||
PackageName: filepath.Base(arguments.OutputPackagePath),
|
||||
PackagePath: arguments.OutputPackagePath,
|
||||
HeaderText: header,
|
||||
GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) {
|
||||
return []generator.Target{
|
||||
&generator.SimpleTarget{
|
||||
PkgName: path.Base(args.OutputPkg), // `path` vs. `filepath` because packages use '/'
|
||||
PkgPath: args.OutputPkg,
|
||||
PkgDir: args.OutputDir,
|
||||
HeaderComment: boilerplate,
|
||||
GeneratorsFunc: func(c *generator.Context) (generators []generator.Generator) {
|
||||
return []generator.Generator{
|
||||
newOpenAPIGen(
|
||||
arguments.OutputFileBaseName,
|
||||
arguments.OutputPackagePath,
|
||||
args.OutputFile,
|
||||
args.OutputPkg,
|
||||
),
|
||||
newAPIViolationGen(),
|
||||
}
|
||||
|
||||
25
vendor/k8s.io/kube-openapi/pkg/generators/enum.go
generated
vendored
25
vendor/k8s.io/kube-openapi/pkg/generators/enum.go
generated
vendored
@@ -22,8 +22,9 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2"
|
||||
"k8s.io/gengo/v2/generator"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
const tagEnumType = "enum"
|
||||
@@ -121,7 +122,7 @@ func parseEnums(c *generator.Context) enumMap {
|
||||
Value: *c.ConstValue,
|
||||
Comment: strings.Join(c.CommentLines, " "),
|
||||
}
|
||||
enumTypes[enumType.Name].appendValue(value)
|
||||
enumTypes[enumType.Name].addIfNotPresent(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -129,7 +130,21 @@ func parseEnums(c *generator.Context) enumMap {
|
||||
return enumTypes
|
||||
}
|
||||
|
||||
func (et *enumType) appendValue(value *enumValue) {
|
||||
func (et *enumType) addIfNotPresent(value *enumValue) {
|
||||
// If we already have an enum case with the same value, then ignore this new
|
||||
// one. This can happen if an enum aliases one from another package and
|
||||
// re-exports the cases.
|
||||
for i, existing := range et.Values {
|
||||
if existing.Value == value.Value {
|
||||
|
||||
// Take the value of the longer comment (or some other deterministic tie breaker)
|
||||
if len(existing.Comment) < len(value.Comment) || (len(existing.Comment) == len(value.Comment) && existing.Comment > value.Comment) {
|
||||
et.Values[i] = value
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
et.Values = append(et.Values, value)
|
||||
}
|
||||
|
||||
@@ -155,7 +170,7 @@ func isEnumType(stringType *types.Type, t *types.Type) bool {
|
||||
}
|
||||
|
||||
func hasEnumTag(t *types.Type) bool {
|
||||
return types.ExtractCommentTags("+", t.CommentLines)[tagEnumType] != nil
|
||||
return gengo.ExtractCommentTags("+", t.CommentLines)[tagEnumType] != nil
|
||||
}
|
||||
|
||||
// whitespaceRegex is the regex for consecutive whitespaces.
|
||||
|
||||
7
vendor/k8s.io/kube-openapi/pkg/generators/extension.go
generated
vendored
7
vendor/k8s.io/kube-openapi/pkg/generators/extension.go
generated
vendored
@@ -21,8 +21,9 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/examples/set-gen/sets"
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2"
|
||||
"k8s.io/gengo/v2/types"
|
||||
"k8s.io/kube-openapi/pkg/util/sets"
|
||||
)
|
||||
|
||||
const extensionPrefix = "x-kubernetes-"
|
||||
@@ -171,7 +172,7 @@ func parseExtensions(comments []string) ([]extension, []error) {
|
||||
}
|
||||
}
|
||||
// Next, generate extensions from "idlTags" (e.g. +listType)
|
||||
tagValues := types.ExtractCommentTags("+", comments)
|
||||
tagValues := gengo.ExtractCommentTags("+", comments)
|
||||
for _, idlTag := range sortedMapKeys(tagValues) {
|
||||
xAttrs, exists := tagToExtension[idlTag]
|
||||
if !exists {
|
||||
|
||||
613
vendor/k8s.io/kube-openapi/pkg/generators/markers.go
generated
vendored
Normal file
613
vendor/k8s.io/kube-openapi/pkg/generators/markers.go
generated
vendored
Normal file
@@ -0,0 +1,613 @@
|
||||
/*
|
||||
Copyright 2022 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package generators
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/v2/types"
|
||||
openapi "k8s.io/kube-openapi/pkg/common"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
)
|
||||
|
||||
type CELTag struct {
|
||||
Rule string `json:"rule,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
MessageExpression string `json:"messageExpression,omitempty"`
|
||||
OptionalOldSelf *bool `json:"optionalOldSelf,omitempty"`
|
||||
Reason string `json:"reason,omitempty"`
|
||||
FieldPath string `json:"fieldPath,omitempty"`
|
||||
}
|
||||
|
||||
func (c *CELTag) Validate() error {
|
||||
if c == nil || *c == (CELTag{}) {
|
||||
return fmt.Errorf("empty CEL tag is not allowed")
|
||||
}
|
||||
|
||||
var errs []error
|
||||
if c.Rule == "" {
|
||||
errs = append(errs, fmt.Errorf("rule cannot be empty"))
|
||||
}
|
||||
if c.Message == "" && c.MessageExpression == "" {
|
||||
errs = append(errs, fmt.Errorf("message or messageExpression must be set"))
|
||||
}
|
||||
if c.Message != "" && c.MessageExpression != "" {
|
||||
errs = append(errs, fmt.Errorf("message and messageExpression cannot be set at the same time"))
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return errors.Join(errs...)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// commentTags represents the parsed comment tags for a given type. These types are then used to generate schema validations.
|
||||
// These only include the newer prefixed tags. The older tags are still supported,
|
||||
// but are not included in this struct. Comment Tags are transformed into a
|
||||
// *spec.Schema, which is then combined with the older marker comments to produce
|
||||
// the generated OpenAPI spec.
|
||||
//
|
||||
// List of tags not included in this struct:
|
||||
//
|
||||
// - +optional
|
||||
// - +default
|
||||
// - +listType
|
||||
// - +listMapKeys
|
||||
// - +mapType
|
||||
type commentTags struct {
|
||||
spec.SchemaProps
|
||||
|
||||
CEL []CELTag `json:"cel,omitempty"`
|
||||
|
||||
// Future markers can all be parsed into this centralized struct...
|
||||
// Optional bool `json:"optional,omitempty"`
|
||||
// Default any `json:"default,omitempty"`
|
||||
}
|
||||
|
||||
// Returns the schema for the given CommentTags instance.
|
||||
// This is the final authoritative schema for the comment tags
|
||||
func (c commentTags) ValidationSchema() (*spec.Schema, error) {
|
||||
res := spec.Schema{
|
||||
SchemaProps: c.SchemaProps,
|
||||
}
|
||||
|
||||
if len(c.CEL) > 0 {
|
||||
// Convert the CELTag to a map[string]interface{} via JSON
|
||||
celTagJSON, err := json.Marshal(c.CEL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal CEL tag: %w", err)
|
||||
}
|
||||
var celTagMap []interface{}
|
||||
if err := json.Unmarshal(celTagJSON, &celTagMap); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal CEL tag: %w", err)
|
||||
}
|
||||
|
||||
res.VendorExtensible.AddExtension("x-kubernetes-validations", celTagMap)
|
||||
}
|
||||
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// validates the parameters in a CommentTags instance. Returns any errors encountered.
|
||||
func (c commentTags) Validate() error {
|
||||
|
||||
var err error
|
||||
|
||||
if c.MinLength != nil && *c.MinLength < 0 {
|
||||
err = errors.Join(err, fmt.Errorf("minLength cannot be negative"))
|
||||
}
|
||||
if c.MaxLength != nil && *c.MaxLength < 0 {
|
||||
err = errors.Join(err, fmt.Errorf("maxLength cannot be negative"))
|
||||
}
|
||||
if c.MinItems != nil && *c.MinItems < 0 {
|
||||
err = errors.Join(err, fmt.Errorf("minItems cannot be negative"))
|
||||
}
|
||||
if c.MaxItems != nil && *c.MaxItems < 0 {
|
||||
err = errors.Join(err, fmt.Errorf("maxItems cannot be negative"))
|
||||
}
|
||||
if c.MinProperties != nil && *c.MinProperties < 0 {
|
||||
err = errors.Join(err, fmt.Errorf("minProperties cannot be negative"))
|
||||
}
|
||||
if c.MaxProperties != nil && *c.MaxProperties < 0 {
|
||||
err = errors.Join(err, fmt.Errorf("maxProperties cannot be negative"))
|
||||
}
|
||||
if c.Minimum != nil && c.Maximum != nil && *c.Minimum > *c.Maximum {
|
||||
err = errors.Join(err, fmt.Errorf("minimum %f is greater than maximum %f", *c.Minimum, *c.Maximum))
|
||||
}
|
||||
if (c.ExclusiveMinimum || c.ExclusiveMaximum) && c.Minimum != nil && c.Maximum != nil && *c.Minimum == *c.Maximum {
|
||||
err = errors.Join(err, fmt.Errorf("exclusiveMinimum/Maximum cannot be set when minimum == maximum"))
|
||||
}
|
||||
if c.MinLength != nil && c.MaxLength != nil && *c.MinLength > *c.MaxLength {
|
||||
err = errors.Join(err, fmt.Errorf("minLength %d is greater than maxLength %d", *c.MinLength, *c.MaxLength))
|
||||
}
|
||||
if c.MinItems != nil && c.MaxItems != nil && *c.MinItems > *c.MaxItems {
|
||||
err = errors.Join(err, fmt.Errorf("minItems %d is greater than maxItems %d", *c.MinItems, *c.MaxItems))
|
||||
}
|
||||
if c.MinProperties != nil && c.MaxProperties != nil && *c.MinProperties > *c.MaxProperties {
|
||||
err = errors.Join(err, fmt.Errorf("minProperties %d is greater than maxProperties %d", *c.MinProperties, *c.MaxProperties))
|
||||
}
|
||||
if c.Pattern != "" {
|
||||
_, e := regexp.Compile(c.Pattern)
|
||||
if e != nil {
|
||||
err = errors.Join(err, fmt.Errorf("invalid pattern %q: %v", c.Pattern, e))
|
||||
}
|
||||
}
|
||||
if c.MultipleOf != nil && *c.MultipleOf == 0 {
|
||||
err = errors.Join(err, fmt.Errorf("multipleOf cannot be 0"))
|
||||
}
|
||||
|
||||
for i, celTag := range c.CEL {
|
||||
celError := celTag.Validate()
|
||||
if celError == nil {
|
||||
continue
|
||||
}
|
||||
err = errors.Join(err, fmt.Errorf("invalid CEL tag at index %d: %w", i, celError))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Performs type-specific validation for CommentTags porameters. Accepts a Type instance and returns any errors encountered during validation.
|
||||
func (c commentTags) ValidateType(t *types.Type) error {
|
||||
var err error
|
||||
|
||||
resolvedType := resolveAliasAndPtrType(t)
|
||||
typeString, _ := openapi.OpenAPITypeFormat(resolvedType.String()) // will be empty for complicated types
|
||||
|
||||
// Structs and interfaces may dynamically be any type, so we cant validate them
|
||||
// easily. We may be able to if we check that they don't implement all the
|
||||
// override functions, but for now we just skip them.
|
||||
if resolvedType.Kind == types.Interface || resolvedType.Kind == types.Struct {
|
||||
return nil
|
||||
}
|
||||
|
||||
isArray := resolvedType.Kind == types.Slice || resolvedType.Kind == types.Array
|
||||
isMap := resolvedType.Kind == types.Map
|
||||
isString := typeString == "string"
|
||||
isInt := typeString == "integer"
|
||||
isFloat := typeString == "number"
|
||||
|
||||
if c.MaxItems != nil && !isArray {
|
||||
err = errors.Join(err, fmt.Errorf("maxItems can only be used on array types"))
|
||||
}
|
||||
if c.MinItems != nil && !isArray {
|
||||
err = errors.Join(err, fmt.Errorf("minItems can only be used on array types"))
|
||||
}
|
||||
if c.UniqueItems && !isArray {
|
||||
err = errors.Join(err, fmt.Errorf("uniqueItems can only be used on array types"))
|
||||
}
|
||||
if c.MaxProperties != nil && !isMap {
|
||||
err = errors.Join(err, fmt.Errorf("maxProperties can only be used on map types"))
|
||||
}
|
||||
if c.MinProperties != nil && !isMap {
|
||||
err = errors.Join(err, fmt.Errorf("minProperties can only be used on map types"))
|
||||
}
|
||||
if c.MinLength != nil && !isString {
|
||||
err = errors.Join(err, fmt.Errorf("minLength can only be used on string types"))
|
||||
}
|
||||
if c.MaxLength != nil && !isString {
|
||||
err = errors.Join(err, fmt.Errorf("maxLength can only be used on string types"))
|
||||
}
|
||||
if c.Pattern != "" && !isString {
|
||||
err = errors.Join(err, fmt.Errorf("pattern can only be used on string types"))
|
||||
}
|
||||
if c.Minimum != nil && !isInt && !isFloat {
|
||||
err = errors.Join(err, fmt.Errorf("minimum can only be used on numeric types"))
|
||||
}
|
||||
if c.Maximum != nil && !isInt && !isFloat {
|
||||
err = errors.Join(err, fmt.Errorf("maximum can only be used on numeric types"))
|
||||
}
|
||||
if c.MultipleOf != nil && !isInt && !isFloat {
|
||||
err = errors.Join(err, fmt.Errorf("multipleOf can only be used on numeric types"))
|
||||
}
|
||||
if c.ExclusiveMinimum && !isInt && !isFloat {
|
||||
err = errors.Join(err, fmt.Errorf("exclusiveMinimum can only be used on numeric types"))
|
||||
}
|
||||
if c.ExclusiveMaximum && !isInt && !isFloat {
|
||||
err = errors.Join(err, fmt.Errorf("exclusiveMaximum can only be used on numeric types"))
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Parses the given comments into a CommentTags type. Validates the parsed comment tags, and returns the result.
|
||||
// Accepts an optional type to validate against, and a prefix to filter out markers not related to validation.
|
||||
// Accepts a prefix to filter out markers not related to validation.
|
||||
// Returns any errors encountered while parsing or validating the comment tags.
|
||||
func ParseCommentTags(t *types.Type, comments []string, prefix string) (*spec.Schema, error) {
|
||||
|
||||
markers, err := parseMarkers(comments, prefix)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse marker comments: %w", err)
|
||||
}
|
||||
nested, err := nestMarkers(markers)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid marker comments: %w", err)
|
||||
}
|
||||
|
||||
// Parse the map into a CommentTags type by marshalling and unmarshalling
|
||||
// as JSON in leiu of an unstructured converter.
|
||||
out, err := json.Marshal(nested)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal marker comments: %w", err)
|
||||
}
|
||||
|
||||
var commentTags commentTags
|
||||
if err = json.Unmarshal(out, &commentTags); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal marker comments: %w", err)
|
||||
}
|
||||
|
||||
// Validate the parsed comment tags
|
||||
validationErrors := commentTags.Validate()
|
||||
|
||||
if t != nil {
|
||||
validationErrors = errors.Join(validationErrors, commentTags.ValidateType(t))
|
||||
}
|
||||
|
||||
if validationErrors != nil {
|
||||
return nil, fmt.Errorf("invalid marker comments: %w", validationErrors)
|
||||
}
|
||||
|
||||
return commentTags.ValidationSchema()
|
||||
}
|
||||
|
||||
var (
|
||||
allowedKeyCharacterSet = `[:_a-zA-Z0-9\[\]\-]`
|
||||
valueEmpty = regexp.MustCompile(fmt.Sprintf(`^(%s*)$`, allowedKeyCharacterSet))
|
||||
valueAssign = regexp.MustCompile(fmt.Sprintf(`^(%s*)=(.*)$`, allowedKeyCharacterSet))
|
||||
valueRawString = regexp.MustCompile(fmt.Sprintf(`^(%s*)>(.*)$`, allowedKeyCharacterSet))
|
||||
)
|
||||
|
||||
// extractCommentTags parses comments for lines of the form:
|
||||
//
|
||||
// 'marker' + "key=value"
|
||||
//
|
||||
// or to specify truthy boolean keys:
|
||||
//
|
||||
// 'marker' + "key"
|
||||
//
|
||||
// Values are optional; "" is the default. A tag can be specified more than
|
||||
// one time and all values are returned. Returns a map with an entry for
|
||||
// for each key and a value.
|
||||
//
|
||||
// Similar to version from gengo, but this version support only allows one
|
||||
// value per key (preferring explicit array indices), supports raw strings
|
||||
// with concatenation, and limits the usable characters allowed in a key
|
||||
// (for simpler parsing).
|
||||
//
|
||||
// Assignments and empty values have the same syntax as from gengo. Raw strings
|
||||
// have the syntax:
|
||||
//
|
||||
// 'marker' + "key>value"
|
||||
// 'marker' + "key>value"
|
||||
//
|
||||
// Successive usages of the same raw string key results in concatenating each
|
||||
// line with `\n` in between. It is an error to use `=` to assing to a previously
|
||||
// assigned key
|
||||
// (in contrast to types.ExtractCommentTags which allows array-typed
|
||||
// values to be specified using `=`).
|
||||
func extractCommentTags(marker string, lines []string) (map[string]string, error) {
|
||||
out := map[string]string{}
|
||||
|
||||
// Used to track the the line immediately prior to the one being iterated.
|
||||
// If there was an invalid or ignored line, these values get reset.
|
||||
lastKey := ""
|
||||
lastIndex := -1
|
||||
lastArrayKey := ""
|
||||
|
||||
var lintErrors []error
|
||||
|
||||
for _, line := range lines {
|
||||
line = strings.Trim(line, " ")
|
||||
|
||||
// Track the current value of the last vars to use in this loop iteration
|
||||
// before they are reset for the next iteration.
|
||||
previousKey := lastKey
|
||||
previousArrayKey := lastArrayKey
|
||||
previousIndex := lastIndex
|
||||
|
||||
// Make sure last vars gets reset if we `continue`
|
||||
lastIndex = -1
|
||||
lastArrayKey = ""
|
||||
lastKey = ""
|
||||
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
} else if !strings.HasPrefix(line, marker) {
|
||||
continue
|
||||
}
|
||||
|
||||
line = strings.TrimPrefix(line, marker)
|
||||
|
||||
key := ""
|
||||
value := ""
|
||||
|
||||
if matches := valueAssign.FindStringSubmatch(line); matches != nil {
|
||||
key = matches[1]
|
||||
value = matches[2]
|
||||
|
||||
// If key exists, throw error.
|
||||
// Some of the old kube open-api gen marker comments like
|
||||
// `+listMapKeys` allowed a list to be specified by writing key=value
|
||||
// multiple times.
|
||||
//
|
||||
// This is not longer supported for the prefixed marker comments.
|
||||
// This is to prevent confusion with the new array syntax which
|
||||
// supports lists of objects.
|
||||
//
|
||||
// The old marker comments like +listMapKeys will remain functional,
|
||||
// but new markers will not support it.
|
||||
if _, ok := out[key]; ok {
|
||||
return nil, fmt.Errorf("cannot have multiple values for key '%v'", key)
|
||||
}
|
||||
|
||||
} else if matches := valueEmpty.FindStringSubmatch(line); matches != nil {
|
||||
key = matches[1]
|
||||
value = ""
|
||||
|
||||
} else if matches := valueRawString.FindStringSubmatch(line); matches != nil {
|
||||
toAdd := strings.Trim(string(matches[2]), " ")
|
||||
|
||||
key = matches[1]
|
||||
|
||||
// First usage as a raw string.
|
||||
if existing, exists := out[key]; !exists {
|
||||
|
||||
// Encode the raw string as JSON to ensure that it is properly escaped.
|
||||
valueBytes, err := json.Marshal(toAdd)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for key %v: %w", key, err)
|
||||
}
|
||||
|
||||
value = string(valueBytes)
|
||||
} else if key != previousKey {
|
||||
// Successive usages of the same key of a raw string must be
|
||||
// consecutive
|
||||
return nil, fmt.Errorf("concatenations to key '%s' must be consecutive with its assignment", key)
|
||||
} else {
|
||||
// If it is a consecutive repeat usage, concatenate to the
|
||||
// existing value.
|
||||
//
|
||||
// Decode JSON string, append to it, re-encode JSON string.
|
||||
// Kinda janky but this is a code-generator...
|
||||
var unmarshalled string
|
||||
if err := json.Unmarshal([]byte(existing), &unmarshalled); err != nil {
|
||||
return nil, fmt.Errorf("invalid value for key %v: %w", key, err)
|
||||
} else {
|
||||
unmarshalled += "\n" + toAdd
|
||||
valueBytes, err := json.Marshal(unmarshalled)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value for key %v: %w", key, err)
|
||||
}
|
||||
|
||||
value = string(valueBytes)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Comment has the correct prefix, but incorrect syntax, so it is an
|
||||
// error
|
||||
return nil, fmt.Errorf("invalid marker comment does not match expected `+key=<json formatted value>` pattern: %v", line)
|
||||
}
|
||||
|
||||
out[key] = value
|
||||
lastKey = key
|
||||
|
||||
// Lint the array subscript for common mistakes. This only lints the last
|
||||
// array index used, (since we do not have a need for nested arrays yet
|
||||
// in markers)
|
||||
if arrayPath, index, hasSubscript, err := extractArraySubscript(key); hasSubscript {
|
||||
// If index is non-zero, check that that previous line was for the same
|
||||
// key and either the same or previous index
|
||||
if err != nil {
|
||||
lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: expected integer index in key '%v'", line, key))
|
||||
} else if previousArrayKey != arrayPath && index != 0 {
|
||||
lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: non-consecutive index %v for key '%v'", line, index, arrayPath))
|
||||
} else if index != previousIndex+1 && index != previousIndex {
|
||||
lintErrors = append(lintErrors, fmt.Errorf("error parsing %v: non-consecutive index %v for key '%v'", line, index, arrayPath))
|
||||
}
|
||||
|
||||
lastIndex = index
|
||||
lastArrayKey = arrayPath
|
||||
}
|
||||
}
|
||||
|
||||
if len(lintErrors) > 0 {
|
||||
return nil, errors.Join(lintErrors...)
|
||||
}
|
||||
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Extracts and parses the given marker comments into a map of key -> value.
|
||||
// Accepts a prefix to filter out markers not related to validation.
|
||||
// The prefix is removed from the key in the returned map.
|
||||
// Empty keys and invalid values will return errors, refs are currently unsupported and will be skipped.
|
||||
func parseMarkers(markerComments []string, prefix string) (map[string]any, error) {
|
||||
markers, err := extractCommentTags(prefix, markerComments)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Parse the values as JSON
|
||||
result := map[string]any{}
|
||||
for key, value := range markers {
|
||||
var unmarshalled interface{}
|
||||
|
||||
if len(key) == 0 {
|
||||
return nil, fmt.Errorf("cannot have empty key for marker comment")
|
||||
} else if _, ok := parseSymbolReference(value, ""); ok {
|
||||
// Skip ref markers
|
||||
continue
|
||||
} else if len(value) == 0 {
|
||||
// Empty value means key is implicitly a bool
|
||||
result[key] = true
|
||||
} else if err := json.Unmarshal([]byte(value), &unmarshalled); err != nil {
|
||||
// Not valid JSON, throw error
|
||||
return nil, fmt.Errorf("failed to parse value for key %v as JSON: %w", key, err)
|
||||
} else {
|
||||
// Is is valid JSON, use as a JSON value
|
||||
result[key] = unmarshalled
|
||||
}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// Converts a map of:
|
||||
//
|
||||
// "a:b:c": 1
|
||||
// "a:b:d": 2
|
||||
// "a:e": 3
|
||||
// "f": 4
|
||||
//
|
||||
// Into:
|
||||
//
|
||||
// map[string]any{
|
||||
// "a": map[string]any{
|
||||
// "b": map[string]any{
|
||||
// "c": 1,
|
||||
// "d": 2,
|
||||
// },
|
||||
// "e": 3,
|
||||
// },
|
||||
// "f": 4,
|
||||
// }
|
||||
//
|
||||
// Returns a list of joined errors for any invalid keys. See putNestedValue for more details.
|
||||
func nestMarkers(markers map[string]any) (map[string]any, error) {
|
||||
nested := make(map[string]any)
|
||||
var errs []error
|
||||
for key, value := range markers {
|
||||
var err error
|
||||
keys := strings.Split(key, ":")
|
||||
|
||||
if err = putNestedValue(nested, keys, value); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(errs) > 0 {
|
||||
return nil, errors.Join(errs...)
|
||||
}
|
||||
|
||||
return nested, nil
|
||||
}
|
||||
|
||||
// Recursively puts a value into the given keypath, creating intermediate maps
|
||||
// and slices as needed. If a key is of the form `foo[bar]`, then bar will be
|
||||
// treated as an index into the array foo. If bar is not a valid integer, putNestedValue returns an error.
|
||||
func putNestedValue(m map[string]any, k []string, v any) error {
|
||||
if len(k) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
key := k[0]
|
||||
rest := k[1:]
|
||||
|
||||
// Array case
|
||||
if arrayKeyWithoutSubscript, index, hasSubscript, err := extractArraySubscript(key); err != nil {
|
||||
return fmt.Errorf("error parsing subscript for key %v: %w", key, err)
|
||||
} else if hasSubscript {
|
||||
key = arrayKeyWithoutSubscript
|
||||
var arrayDestination []any
|
||||
if existing, ok := m[key]; !ok {
|
||||
arrayDestination = make([]any, index+1)
|
||||
} else if existing, ok := existing.([]any); !ok {
|
||||
// Error case. Existing isn't of correct type. Can happen if
|
||||
// someone is subscripting a field that was previously not an array
|
||||
return fmt.Errorf("expected []any at key %v, got %T", key, existing)
|
||||
} else if index >= len(existing) {
|
||||
// Ensure array is big enough
|
||||
arrayDestination = append(existing, make([]any, index-len(existing)+1)...)
|
||||
} else {
|
||||
arrayDestination = existing
|
||||
}
|
||||
|
||||
m[key] = arrayDestination
|
||||
if arrayDestination[index] == nil {
|
||||
// Doesn't exist case, create the destination.
|
||||
// Assumes the destination is a map for now. Theoretically could be
|
||||
// extended to support arrays of arrays, but that's not needed yet.
|
||||
destination := make(map[string]any)
|
||||
arrayDestination[index] = destination
|
||||
if err = putNestedValue(destination, rest, v); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if dst, ok := arrayDestination[index].(map[string]any); ok {
|
||||
// Already exists case, correct type
|
||||
if putNestedValue(dst, rest, v); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Already exists, incorrect type. Error
|
||||
// This shouldn't be possible.
|
||||
return fmt.Errorf("expected map at %v[%v], got %T", key, index, arrayDestination[index])
|
||||
}
|
||||
|
||||
return nil
|
||||
} else if len(rest) == 0 {
|
||||
// Base case. Single key. Just set into destination
|
||||
m[key] = v
|
||||
return nil
|
||||
}
|
||||
|
||||
if existing, ok := m[key]; !ok {
|
||||
destination := make(map[string]any)
|
||||
m[key] = destination
|
||||
return putNestedValue(destination, rest, v)
|
||||
} else if destination, ok := existing.(map[string]any); ok {
|
||||
return putNestedValue(destination, rest, v)
|
||||
} else {
|
||||
// Error case. Existing isn't of correct type. Can happen if prior comment
|
||||
// referred to value as an error
|
||||
return fmt.Errorf("expected map[string]any at key %v, got %T", key, existing)
|
||||
}
|
||||
}
|
||||
|
||||
// extractArraySubscript extracts the left array subscript from a key of
|
||||
// the form `foo[bar][baz]` -> "bar".
|
||||
// Returns the key without the subscript, the index, and a bool indicating if
|
||||
// the key had a subscript.
|
||||
// If the key has a subscript, but the subscript is not a valid integer, returns an error.
|
||||
//
|
||||
// This can be adapted to support multidimensional subscripts probably fairly
|
||||
// easily by retuning a list of ints
|
||||
func extractArraySubscript(str string) (string, int, bool, error) {
|
||||
subscriptIdx := strings.Index(str, "[")
|
||||
if subscriptIdx == -1 {
|
||||
return "", -1, false, nil
|
||||
}
|
||||
|
||||
subscript := strings.Split(str[subscriptIdx+1:], "]")[0]
|
||||
if len(subscript) == 0 {
|
||||
return "", -1, false, fmt.Errorf("empty subscript not allowed")
|
||||
}
|
||||
|
||||
index, err := strconv.Atoi(subscript)
|
||||
if err != nil {
|
||||
return "", -1, false, fmt.Errorf("expected integer index in key %v", str)
|
||||
} else if index < 0 {
|
||||
return "", -1, false, fmt.Errorf("subscript '%v' is invalid. index must be positive", subscript)
|
||||
}
|
||||
|
||||
return str[:subscriptIdx], index, true, nil
|
||||
}
|
||||
269
vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
generated
vendored
269
vendor/k8s.io/kube-openapi/pkg/generators/openapi.go
generated
vendored
@@ -21,23 +21,27 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
defaultergen "k8s.io/gengo/examples/defaulter-gen/generators"
|
||||
"k8s.io/gengo/generator"
|
||||
"k8s.io/gengo/namer"
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2"
|
||||
"k8s.io/gengo/v2/generator"
|
||||
"k8s.io/gengo/v2/namer"
|
||||
"k8s.io/gengo/v2/types"
|
||||
openapi "k8s.io/kube-openapi/pkg/common"
|
||||
"k8s.io/kube-openapi/pkg/validation/spec"
|
||||
|
||||
"k8s.io/klog/v2"
|
||||
)
|
||||
|
||||
// This is the comment tag that carries parameters for open API generation.
|
||||
const tagName = "k8s:openapi-gen"
|
||||
const markerPrefix = "+k8s:validation:"
|
||||
const tagOptional = "optional"
|
||||
const tagRequired = "required"
|
||||
const tagDefault = "default"
|
||||
|
||||
// Known values for the tag.
|
||||
@@ -54,11 +58,11 @@ var tempPatchTags = [...]string{
|
||||
}
|
||||
|
||||
func getOpenAPITagValue(comments []string) []string {
|
||||
return types.ExtractCommentTags("+", comments)[tagName]
|
||||
return gengo.ExtractCommentTags("+", comments)[tagName]
|
||||
}
|
||||
|
||||
func getSingleTagsValue(comments []string, tag string) (string, error) {
|
||||
tags, ok := types.ExtractCommentTags("+", comments)[tag]
|
||||
tags, ok := gengo.ExtractCommentTags("+", comments)[tag]
|
||||
if !ok || len(tags) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
@@ -78,14 +82,25 @@ func hasOpenAPITagValue(comments []string, value string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// hasOptionalTag returns true if the member has +optional in its comments or
|
||||
// omitempty in its json tags.
|
||||
func hasOptionalTag(m *types.Member) bool {
|
||||
hasOptionalCommentTag := types.ExtractCommentTags(
|
||||
// isOptional returns error if the member has +optional and +required in
|
||||
// its comments. If +optional is present it returns true. If +required is present
|
||||
// it returns false. Otherwise, it returns true if `omitempty` JSON tag is present
|
||||
func isOptional(m *types.Member) (bool, error) {
|
||||
hasOptionalCommentTag := gengo.ExtractCommentTags(
|
||||
"+", m.CommentLines)[tagOptional] != nil
|
||||
hasOptionalJsonTag := strings.Contains(
|
||||
reflect.StructTag(m.Tags).Get("json"), "omitempty")
|
||||
return hasOptionalCommentTag || hasOptionalJsonTag
|
||||
hasRequiredCommentTag := gengo.ExtractCommentTags(
|
||||
"+", m.CommentLines)[tagRequired] != nil
|
||||
if hasOptionalCommentTag && hasRequiredCommentTag {
|
||||
return false, fmt.Errorf("member %s cannot be both optional and required", m.Name)
|
||||
} else if hasRequiredCommentTag {
|
||||
return false, nil
|
||||
} else if hasOptionalCommentTag {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If neither +optional nor +required is present in the comments,
|
||||
// infer optional from the json tags.
|
||||
return strings.Contains(reflect.StructTag(m.Tags).Get("json"), "omitempty"), nil
|
||||
}
|
||||
|
||||
func apiTypeFilterFunc(c *generator.Context, t *types.Type) bool {
|
||||
@@ -110,16 +125,16 @@ const (
|
||||
|
||||
// openApiGen produces a file with auto-generated OpenAPI functions.
|
||||
type openAPIGen struct {
|
||||
generator.DefaultGen
|
||||
generator.GoGenerator
|
||||
// TargetPackage is the package that will get GetOpenAPIDefinitions function returns all open API definitions.
|
||||
targetPackage string
|
||||
imports namer.ImportTracker
|
||||
}
|
||||
|
||||
func newOpenAPIGen(sanitizedName string, targetPackage string) generator.Generator {
|
||||
func newOpenAPIGen(outputFilename string, targetPackage string) generator.Generator {
|
||||
return &openAPIGen{
|
||||
DefaultGen: generator.DefaultGen{
|
||||
OptionalName: sanitizedName,
|
||||
GoGenerator: generator.GoGenerator{
|
||||
OutputFilename: outputFilename,
|
||||
},
|
||||
imports: generator.NewImportTrackerForPackage(targetPackage),
|
||||
targetPackage: targetPackage,
|
||||
@@ -141,16 +156,6 @@ func (g *openAPIGen) Namers(c *generator.Context) namer.NameSystems {
|
||||
}
|
||||
}
|
||||
|
||||
func (g *openAPIGen) isOtherPackage(pkg string) bool {
|
||||
if pkg == g.targetPackage {
|
||||
return false
|
||||
}
|
||||
if strings.HasSuffix(pkg, "\""+g.targetPackage+"\"") {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (g *openAPIGen) Imports(c *generator.Context) []string {
|
||||
importLines := []string{}
|
||||
for _, singleImport := range g.imports.ImportLines() {
|
||||
@@ -292,7 +297,8 @@ func hasOpenAPIV3OneOfMethod(t *types.Type) bool {
|
||||
|
||||
// typeShortName returns short package name (e.g. the name x appears in package x definition) dot type name.
|
||||
func typeShortName(t *types.Type) string {
|
||||
return filepath.Base(t.Name.Package) + "." + t.Name.Name
|
||||
// `path` vs. `filepath` because packages use '/'
|
||||
return path.Base(t.Name.Package) + "." + t.Name.Name
|
||||
}
|
||||
|
||||
func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([]string, error) {
|
||||
@@ -315,7 +321,10 @@ func (g openAPITypeWriter) generateMembers(t *types.Type, required []string) ([]
|
||||
if name == "" {
|
||||
continue
|
||||
}
|
||||
if !hasOptionalTag(&m) {
|
||||
if isOptional, err := isOptional(&m); err != nil {
|
||||
klog.Errorf("Error when generating: %v, %v\n", name, m)
|
||||
return required, err
|
||||
} else if !isOptional {
|
||||
required = append(required, name)
|
||||
}
|
||||
if err = g.generateProperty(&m, t); err != nil {
|
||||
@@ -353,10 +362,76 @@ func (g openAPITypeWriter) generateCall(t *types.Type) error {
|
||||
return g.Error()
|
||||
}
|
||||
|
||||
func (g openAPITypeWriter) generateValueValidations(vs *spec.SchemaProps) error {
|
||||
|
||||
if vs == nil {
|
||||
return nil
|
||||
}
|
||||
args := generator.Args{
|
||||
"ptrTo": &types.Type{
|
||||
Name: types.Name{
|
||||
Package: "k8s.io/utils/ptr",
|
||||
Name: "To",
|
||||
}},
|
||||
"spec": vs,
|
||||
}
|
||||
if vs.Minimum != nil {
|
||||
g.Do("Minimum: $.ptrTo|raw$[float64]($.spec.Minimum$),\n", args)
|
||||
}
|
||||
if vs.Maximum != nil {
|
||||
g.Do("Maximum: $.ptrTo|raw$[float64]($.spec.Maximum$),\n", args)
|
||||
}
|
||||
if vs.ExclusiveMinimum {
|
||||
g.Do("ExclusiveMinimum: true,\n", args)
|
||||
}
|
||||
if vs.ExclusiveMaximum {
|
||||
g.Do("ExclusiveMaximum: true,\n", args)
|
||||
}
|
||||
if vs.MinLength != nil {
|
||||
g.Do("MinLength: $.ptrTo|raw$[int64]($.spec.MinLength$),\n", args)
|
||||
}
|
||||
if vs.MaxLength != nil {
|
||||
g.Do("MaxLength: $.ptrTo|raw$[int64]($.spec.MaxLength$),\n", args)
|
||||
}
|
||||
|
||||
if vs.MinProperties != nil {
|
||||
g.Do("MinProperties: $.ptrTo|raw$[int64]($.spec.MinProperties$),\n", args)
|
||||
}
|
||||
if vs.MaxProperties != nil {
|
||||
g.Do("MaxProperties: $.ptrTo|raw$[int64]($.spec.MaxProperties$),\n", args)
|
||||
}
|
||||
if len(vs.Pattern) > 0 {
|
||||
p, err := json.Marshal(vs.Pattern)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("Pattern: $.$,\n", string(p))
|
||||
}
|
||||
if vs.MultipleOf != nil {
|
||||
g.Do("MultipleOf: $.ptrTo|raw$[float64]($.spec.MultipleOf$),\n", args)
|
||||
}
|
||||
if vs.MinItems != nil {
|
||||
g.Do("MinItems: $.ptrTo|raw$[int64]($.spec.MinItems$),\n", args)
|
||||
}
|
||||
if vs.MaxItems != nil {
|
||||
g.Do("MaxItems: $.ptrTo|raw$[int64]($.spec.MaxItems$),\n", args)
|
||||
}
|
||||
if vs.UniqueItems {
|
||||
g.Do("UniqueItems: true,\n", nil)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g openAPITypeWriter) generate(t *types.Type) error {
|
||||
// Only generate for struct type and ignore the rest
|
||||
switch t.Kind {
|
||||
case types.Struct:
|
||||
validationSchema, err := ParseCommentTags(t, t.CommentLines, markerPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
hasV2Definition := hasOpenAPIDefinitionMethod(t)
|
||||
hasV2DefinitionTypeAndFormat := hasOpenAPIDefinitionMethods(t)
|
||||
hasV3OneOfTypes := hasOpenAPIV3OneOfMethod(t)
|
||||
@@ -376,10 +451,17 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
|
||||
"SchemaProps: spec.SchemaProps{\n", args)
|
||||
g.generateDescription(t.CommentLines)
|
||||
g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+
|
||||
"},\n"+
|
||||
"},\n"+
|
||||
"})\n}\n\n", args)
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args)
|
||||
err = g.generateValueValidations(&validationSchema.SchemaProps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
g.Do("})\n}\n\n", args)
|
||||
return nil
|
||||
case hasV2DefinitionTypeAndFormat && hasV3OneOfTypes:
|
||||
// generate v3 def.
|
||||
@@ -388,20 +470,34 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
|
||||
"SchemaProps: spec.SchemaProps{\n", args)
|
||||
g.generateDescription(t.CommentLines)
|
||||
g.Do("OneOf:common.GenerateOpenAPIV3OneOfSchema($.type|raw${}.OpenAPIV3OneOfTypes()),\n"+
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+
|
||||
"},\n"+
|
||||
"},\n"+
|
||||
"},", args)
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args)
|
||||
err = g.generateValueValidations(&validationSchema.SchemaProps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
g.Do("},", args)
|
||||
// generate v2 def.
|
||||
g.Do("$.OpenAPIDefinition|raw${\n"+
|
||||
"Schema: spec.Schema{\n"+
|
||||
"SchemaProps: spec.SchemaProps{\n", args)
|
||||
g.generateDescription(t.CommentLines)
|
||||
g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+
|
||||
"},\n"+
|
||||
"},\n"+
|
||||
"})\n}\n\n", args)
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args)
|
||||
err = g.generateValueValidations(&validationSchema.SchemaProps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
g.Do("})\n}\n\n", args)
|
||||
return nil
|
||||
case hasV2DefinitionTypeAndFormat:
|
||||
g.Do("return $.OpenAPIDefinition|raw${\n"+
|
||||
@@ -409,18 +505,30 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
|
||||
"SchemaProps: spec.SchemaProps{\n", args)
|
||||
g.generateDescription(t.CommentLines)
|
||||
g.Do("Type:$.type|raw${}.OpenAPISchemaType(),\n"+
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n"+
|
||||
"},\n"+
|
||||
"},\n"+
|
||||
"}\n}\n\n", args)
|
||||
"Format:$.type|raw${}.OpenAPISchemaFormat(),\n", args)
|
||||
err = g.generateValueValidations(&validationSchema.SchemaProps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
g.Do("}\n}\n\n", args)
|
||||
return nil
|
||||
case hasV3OneOfTypes:
|
||||
// having v3 oneOf types without custom v2 type or format does not make sense.
|
||||
return fmt.Errorf("type %q has v3 one of types but not v2 type or format", t.Name)
|
||||
}
|
||||
|
||||
g.Do("return $.OpenAPIDefinition|raw${\nSchema: spec.Schema{\nSchemaProps: spec.SchemaProps{\n", args)
|
||||
g.generateDescription(t.CommentLines)
|
||||
g.Do("Type: []string{\"object\"},\n", nil)
|
||||
err = g.generateValueValidations(&validationSchema.SchemaProps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// write members into a temporary buffer, in order to postpone writing out the Properties field. We only do
|
||||
// that if it is not empty.
|
||||
@@ -441,7 +549,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
|
||||
g.Do("Required: []string{\"$.$\"},\n", strings.Join(required, "\",\""))
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
if err := g.generateStructExtensions(t); err != nil {
|
||||
if err := g.generateStructExtensions(t, validationSchema.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
@@ -474,7 +582,7 @@ func (g openAPITypeWriter) generate(t *types.Type) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error {
|
||||
func (g openAPITypeWriter) generateStructExtensions(t *types.Type, otherExtensions map[string]interface{}) error {
|
||||
extensions, errors := parseExtensions(t.CommentLines)
|
||||
// Initially, we will only log struct extension errors.
|
||||
if len(errors) > 0 {
|
||||
@@ -490,11 +598,11 @@ func (g openAPITypeWriter) generateStructExtensions(t *types.Type) error {
|
||||
}
|
||||
|
||||
// TODO(seans3): Validate struct extensions here.
|
||||
g.emitExtensions(extensions, unions)
|
||||
g.emitExtensions(extensions, unions, otherExtensions)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type) error {
|
||||
func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *types.Type, otherExtensions map[string]interface{}) error {
|
||||
extensions, parseErrors := parseExtensions(m.CommentLines)
|
||||
validationErrors := validateMemberExtensions(extensions, m)
|
||||
errors := append(parseErrors, validationErrors...)
|
||||
@@ -505,13 +613,13 @@ func (g openAPITypeWriter) generateMemberExtensions(m *types.Member, parent *typ
|
||||
klog.V(2).Infof("%s %s\n", errorPrefix, e)
|
||||
}
|
||||
}
|
||||
g.emitExtensions(extensions, nil)
|
||||
g.emitExtensions(extensions, nil, otherExtensions)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union) {
|
||||
func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union, otherExtensions map[string]interface{}) {
|
||||
// If any extensions exist, then emit code to create them.
|
||||
if len(extensions) == 0 && len(unions) == 0 {
|
||||
if len(extensions) == 0 && len(unions) == 0 && len(otherExtensions) == 0 {
|
||||
return
|
||||
}
|
||||
g.Do("VendorExtensible: spec.VendorExtensible{\nExtensions: spec.Extensions{\n", nil)
|
||||
@@ -534,6 +642,16 @@ func (g openAPITypeWriter) emitExtensions(extensions []extension, unions []union
|
||||
}
|
||||
g.Do("},\n", nil)
|
||||
}
|
||||
|
||||
if len(otherExtensions) > 0 {
|
||||
for k, v := range otherExtensions {
|
||||
g.Do("$.key$: $.value$,\n", map[string]interface{}{
|
||||
"key": fmt.Sprintf("%#v", k),
|
||||
"value": fmt.Sprintf("%#v", v),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
g.Do("},\n},\n", nil)
|
||||
}
|
||||
|
||||
@@ -585,7 +703,7 @@ func defaultFromComments(comments []string, commentPath string, t *types.Type) (
|
||||
}
|
||||
|
||||
var i interface{}
|
||||
if id, ok := defaultergen.ParseSymbolReference(tag, commentPath); ok {
|
||||
if id, ok := parseSymbolReference(tag, commentPath); ok {
|
||||
klog.Errorf("%v, %v", id, commentPath)
|
||||
return nil, &id, nil
|
||||
} else if err := json.Unmarshal([]byte(tag), &i); err != nil {
|
||||
@@ -594,6 +712,31 @@ func defaultFromComments(comments []string, commentPath string, t *types.Type) (
|
||||
return i, nil, nil
|
||||
}
|
||||
|
||||
var refRE = regexp.MustCompile(`^ref\((?P<reference>[^"]+)\)$`)
|
||||
var refREIdentIndex = refRE.SubexpIndex("reference")
|
||||
|
||||
// parseSymbolReference looks for strings that match one of the following:
|
||||
// - ref(Ident)
|
||||
// - ref(pkgpath.Ident)
|
||||
// If the input string matches either of these, it will return the (optional)
|
||||
// pkgpath, the Ident, and true. Otherwise it will return empty strings and
|
||||
// false.
|
||||
//
|
||||
// This is borrowed from k8s.io/code-generator.
|
||||
func parseSymbolReference(s, sourcePackage string) (types.Name, bool) {
|
||||
matches := refRE.FindStringSubmatch(s)
|
||||
if len(matches) < refREIdentIndex || matches[refREIdentIndex] == "" {
|
||||
return types.Name{}, false
|
||||
}
|
||||
|
||||
contents := matches[refREIdentIndex]
|
||||
name := types.ParseFullyQualifiedName(contents)
|
||||
if len(name.Package) == 0 {
|
||||
name.Package = sourcePackage
|
||||
}
|
||||
return name, true
|
||||
}
|
||||
|
||||
func implementsCustomUnmarshalling(t *types.Type) bool {
|
||||
switch t.Kind {
|
||||
case types.Pointer:
|
||||
@@ -718,11 +861,15 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type)
|
||||
if name == "" {
|
||||
return nil
|
||||
}
|
||||
validationSchema, err := ParseCommentTags(m.Type, m.CommentLines, markerPrefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := g.validatePatchTags(m, parent); err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("\"$.$\": {\n", name)
|
||||
if err := g.generateMemberExtensions(m, parent); err != nil {
|
||||
if err := g.generateMemberExtensions(m, parent, validationSchema.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
g.Do("SchemaProps: spec.SchemaProps{\n", nil)
|
||||
@@ -741,6 +888,10 @@ func (g openAPITypeWriter) generateProperty(m *types.Member, parent *types.Type)
|
||||
if err := g.generateDefault(m.CommentLines, m.Type, omitEmpty, parent); err != nil {
|
||||
return fmt.Errorf("failed to generate default in %v: %v: %v", parent, m.Name, err)
|
||||
}
|
||||
err = g.generateValueValidations(&validationSchema.SchemaProps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t := resolveAliasAndPtrType(m.Type)
|
||||
// If we can get a openAPI type and format for this type, we consider it to be simple property
|
||||
typeString, format := openapi.OpenAPITypeFormat(t.String())
|
||||
@@ -814,6 +965,10 @@ func (g openAPITypeWriter) generateMapProperty(t *types.Type) error {
|
||||
typeString, format := openapi.OpenAPITypeFormat(elemType.String())
|
||||
if typeString != "" {
|
||||
g.generateSimpleProperty(typeString, format)
|
||||
if enumType, isEnum := g.enumContext.EnumType(t.Elem); isEnum {
|
||||
// original type is an enum, add "Enum: " and the values
|
||||
g.Do("Enum: []interface{}{$.$},\n", strings.Join(enumType.ValueStrings(), ", "))
|
||||
}
|
||||
g.Do("},\n},\n},\n", nil)
|
||||
return nil
|
||||
}
|
||||
@@ -847,6 +1002,10 @@ func (g openAPITypeWriter) generateSliceProperty(t *types.Type) error {
|
||||
typeString, format := openapi.OpenAPITypeFormat(elemType.String())
|
||||
if typeString != "" {
|
||||
g.generateSimpleProperty(typeString, format)
|
||||
if enumType, isEnum := g.enumContext.EnumType(t.Elem); isEnum {
|
||||
// original type is an enum, add "Enum: " and the values
|
||||
g.Do("Enum: []interface{}{$.$},\n", strings.Join(enumType.ValueStrings(), ", "))
|
||||
}
|
||||
g.Do("},\n},\n},\n", nil)
|
||||
return nil
|
||||
}
|
||||
|
||||
5
vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go
generated
vendored
5
vendor/k8s.io/kube-openapi/pkg/generators/rules/idl_tag.go
generated
vendored
@@ -1,7 +1,8 @@
|
||||
package rules
|
||||
|
||||
import (
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
const ListTypeIDLTag = "listType"
|
||||
@@ -24,7 +25,7 @@ func (l *ListTypeMissing) Validate(t *types.Type) ([]string, error) {
|
||||
switch t.Kind {
|
||||
case types.Struct:
|
||||
for _, m := range t.Members {
|
||||
hasListType := types.ExtractCommentTags("+", m.CommentLines)[ListTypeIDLTag] != nil
|
||||
hasListType := gengo.ExtractCommentTags("+", m.CommentLines)[ListTypeIDLTag] != nil
|
||||
|
||||
if m.Name == "Items" && m.Type.Kind == types.Slice && hasNamedMember(t, "ListMeta") {
|
||||
if hasListType {
|
||||
|
||||
4
vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
generated
vendored
4
vendor/k8s.io/kube-openapi/pkg/generators/rules/names_match.go
generated
vendored
@@ -22,7 +22,7 @@ import (
|
||||
|
||||
"k8s.io/kube-openapi/pkg/util/sets"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -135,7 +135,7 @@ func namesMatch(goName, jsonName string) bool {
|
||||
if !isAllowedName(goName) || !isAllowedName(jsonName) {
|
||||
return false
|
||||
}
|
||||
if strings.ToLower(goName) != strings.ToLower(jsonName) {
|
||||
if !strings.EqualFold(goName, jsonName) {
|
||||
return false
|
||||
}
|
||||
// Go field names must be CamelCase. JSON field names must be camelCase.
|
||||
|
||||
2
vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go
generated
vendored
2
vendor/k8s.io/kube-openapi/pkg/generators/rules/omitempty_match_case.go
generated
vendored
@@ -20,7 +20,7 @@ import (
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
// OmitEmptyMatchCase implements APIRule interface.
|
||||
|
||||
19
vendor/k8s.io/kube-openapi/pkg/generators/union.go
generated
vendored
19
vendor/k8s.io/kube-openapi/pkg/generators/union.go
generated
vendored
@@ -20,7 +20,8 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"k8s.io/gengo/types"
|
||||
"k8s.io/gengo/v2"
|
||||
"k8s.io/gengo/v2/types"
|
||||
)
|
||||
|
||||
const tagUnionMember = "union"
|
||||
@@ -141,7 +142,7 @@ func parseEmbeddedUnion(t *types.Type) ([]union, []error) {
|
||||
// embedded types.
|
||||
func parseUnionStruct(t *types.Type) (*union, []error) {
|
||||
errors := []error{}
|
||||
if types.ExtractCommentTags("+", t.CommentLines)[tagUnionMember] == nil {
|
||||
if gengo.ExtractCommentTags("+", t.CommentLines)[tagUnionMember] == nil {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -156,14 +157,14 @@ func parseUnionStruct(t *types.Type) (*union, []error) {
|
||||
errors = append(errors, fmt.Errorf("union structures can't have embedded fields: %v.%v", t.Name, m.Name))
|
||||
continue
|
||||
}
|
||||
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil {
|
||||
if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil {
|
||||
errors = append(errors, fmt.Errorf("union struct can't have unionDeprecated members: %v.%v", t.Name, m.Name))
|
||||
continue
|
||||
}
|
||||
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil {
|
||||
if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil {
|
||||
errors = append(errors, u.setDiscriminator(jsonName)...)
|
||||
} else {
|
||||
if !hasOptionalTag(&m) {
|
||||
if optional, err := isOptional(&m); !optional || err != nil {
|
||||
errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name))
|
||||
}
|
||||
u.addMember(jsonName, m.Name)
|
||||
@@ -186,15 +187,15 @@ func parseUnionMembers(t *types.Type) (*union, []error) {
|
||||
if shouldInlineMembers(&m) {
|
||||
continue
|
||||
}
|
||||
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil {
|
||||
if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDiscriminator] != nil {
|
||||
errors = append(errors, u.setDiscriminator(jsonName)...)
|
||||
}
|
||||
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionMember] != nil {
|
||||
if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionMember] != nil {
|
||||
errors = append(errors, fmt.Errorf("union tag is not accepted on struct members: %v.%v", t.Name, m.Name))
|
||||
continue
|
||||
}
|
||||
if types.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil {
|
||||
if !hasOptionalTag(&m) {
|
||||
if gengo.ExtractCommentTags("+", m.CommentLines)[tagUnionDeprecated] != nil {
|
||||
if optional, err := isOptional(&m); !optional || err != nil {
|
||||
errors = append(errors, fmt.Errorf("union members must be optional: %v.%v", t.Name, m.Name))
|
||||
}
|
||||
u.addMember(jsonName, m.Name)
|
||||
|
||||
3
vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
generated
vendored
3
vendor/k8s.io/kube-openapi/pkg/schemaconv/smd.go
generated
vendored
@@ -214,9 +214,6 @@ func makeUnion(extensions map[string]interface{}) (schema.Union, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if union.Discriminator != nil && len(union.Fields) == 0 {
|
||||
return schema.Union{}, fmt.Errorf("discriminator set to %v, but no fields in union", *union.Discriminator)
|
||||
}
|
||||
return union, nil
|
||||
}
|
||||
|
||||
|
||||
19
vendor/modules.txt
vendored
19
vendor/modules.txt
vendored
@@ -907,12 +907,12 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
## explicit; go 1.18
|
||||
golang.org/x/exp/constraints
|
||||
golang.org/x/exp/slices
|
||||
# golang.org/x/mod v0.14.0
|
||||
# golang.org/x/mod v0.15.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/mod/internal/lazyregexp
|
||||
golang.org/x/mod/module
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.19.0
|
||||
# golang.org/x/net v0.21.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/bpf
|
||||
golang.org/x/net/context
|
||||
@@ -937,7 +937,7 @@ golang.org/x/oauth2/google/internal/externalaccount
|
||||
golang.org/x/oauth2/internal
|
||||
golang.org/x/oauth2/jws
|
||||
golang.org/x/oauth2/jwt
|
||||
# golang.org/x/sync v0.5.0
|
||||
# golang.org/x/sync v0.6.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sync/errgroup
|
||||
golang.org/x/sync/singleflight
|
||||
@@ -987,7 +987,7 @@ golang.org/x/text/width
|
||||
# golang.org/x/time v0.3.0
|
||||
## explicit
|
||||
golang.org/x/time/rate
|
||||
# golang.org/x/tools v0.16.1
|
||||
# golang.org/x/tools v0.18.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/tools/benchmark/parse
|
||||
golang.org/x/tools/cmd/stringer
|
||||
@@ -1227,6 +1227,13 @@ k8s.io/gengo/generator
|
||||
k8s.io/gengo/namer
|
||||
k8s.io/gengo/parser
|
||||
k8s.io/gengo/types
|
||||
# k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70
|
||||
## explicit; go 1.20
|
||||
k8s.io/gengo/v2
|
||||
k8s.io/gengo/v2/generator
|
||||
k8s.io/gengo/v2/namer
|
||||
k8s.io/gengo/v2/parser
|
||||
k8s.io/gengo/v2/types
|
||||
# k8s.io/klog/v2 v2.120.1
|
||||
## explicit; go 1.18
|
||||
k8s.io/klog/v2
|
||||
@@ -1247,8 +1254,8 @@ k8s.io/klog/v2/textlogger
|
||||
## explicit; go 1.22.0
|
||||
# k8s.io/kube-controller-manager v0.0.0 => ./staging/src/k8s.io/kube-controller-manager
|
||||
## explicit; go 1.22.0
|
||||
# k8s.io/kube-openapi v0.0.0-20231113174909-778a5567bc1e
|
||||
## explicit; go 1.19
|
||||
# k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340
|
||||
## explicit; go 1.20
|
||||
k8s.io/kube-openapi/cmd/openapi-gen
|
||||
k8s.io/kube-openapi/cmd/openapi-gen/args
|
||||
k8s.io/kube-openapi/pkg/aggregator
|
||||
|
||||
Reference in New Issue
Block a user