Add test/typecheck, a fast typecheck for all build platforms.
Most of the time spent compiling is spent optimizing and linking binary code. Most errors occur at the syntax or semantic (type) layers. Go's compiler is importable as a normal package, so we can do fast syntax and type checking for the 10 platforms we build on. This currently takes ~6 minutes of CPU time (parallelized). This makes presubmit cross builds superfluous, since it should catch most cross-build breaks (generally Unix and 64-bit assumptions). Example output: $ time go run test/typecheck/main.go type-checking: linux/amd64, windows/386, darwin/amd64, linux/arm, linux/386, windows/amd64, linux/arm64, linux/ppc64le, linux/s390x, darwin/386 ERROR(windows/amd64) pkg/proxy/ipvs/proxier.go:1708:27: ENXIO not declared by package unix ERROR(windows/386) pkg/proxy/ipvs/proxier.go:1708:27: ENXIO not declared by package unix real 0m45.083s user 6m15.504s sys 1m14.000s
This commit is contained in:
347
test/typecheck/main.go
Normal file
347
test/typecheck/main.go
Normal file
@@ -0,0 +1,347 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// do a fast type check of kubernetes code, for all platforms.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"golang.org/x/crypto/ssh/terminal"
|
||||
// TODO(rmmh): remove this when golang/go#23712 is fixed, and the
|
||||
// fix is the current minimum Go version to build Kubernetes.
|
||||
"k8s.io/kubernetes/test/typecheck/srcimporter"
|
||||
"k8s.io/kubernetes/third_party/forked/golang/go/types"
|
||||
)
|
||||
|
||||
var (
|
||||
verbose = flag.Bool("verbose", false, "print more information")
|
||||
cross = flag.Bool("cross", true, "build for all platforms")
|
||||
platforms = flag.String("platform", "", "comma-separated list of platforms to typecheck")
|
||||
timings = flag.Bool("time", false, "output times taken for each phase")
|
||||
defuses = flag.Bool("defuse", false, "output defs/uses")
|
||||
serial = flag.Bool("serial", false, "don't type check platforms in parallel")
|
||||
|
||||
isTerminal = terminal.IsTerminal(int(os.Stdout.Fd()))
|
||||
logPrefix = ""
|
||||
|
||||
// When processed in order, windows and darwin are early to make
|
||||
// interesting OS-based errors happen earlier.
|
||||
crossPlatforms = []string{
|
||||
"linux/amd64", "windows/386",
|
||||
"darwin/amd64", "linux/arm",
|
||||
"linux/386", "windows/amd64",
|
||||
"linux/arm64", "linux/ppc64le",
|
||||
"linux/s390x", "darwin/386",
|
||||
}
|
||||
)
|
||||
|
||||
type analyzer struct {
|
||||
fset *token.FileSet // positions are relative to fset
|
||||
conf types.Config
|
||||
ctx build.Context
|
||||
failed bool
|
||||
platform string
|
||||
donePaths map[string]interface{}
|
||||
}
|
||||
|
||||
func newAnalyzer(platform string) *analyzer {
|
||||
ctx := build.Default
|
||||
platSplit := strings.Split(platform, "/")
|
||||
ctx.GOOS, ctx.GOARCH = platSplit[0], platSplit[1]
|
||||
ctx.CgoEnabled = true
|
||||
|
||||
a := &analyzer{
|
||||
platform: platform,
|
||||
fset: token.NewFileSet(),
|
||||
ctx: ctx,
|
||||
donePaths: make(map[string]interface{}),
|
||||
}
|
||||
a.conf = types.Config{
|
||||
FakeImportC: true,
|
||||
Error: a.handleError,
|
||||
Sizes: types.SizesFor("gc", a.ctx.GOARCH),
|
||||
}
|
||||
|
||||
a.conf.Importer = srcimporter.New(
|
||||
&a.ctx, a.fset, make(map[string]*types.Package))
|
||||
|
||||
if *verbose {
|
||||
fmt.Printf("context: %#v\n", ctx)
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
func (a *analyzer) handleError(err error) {
|
||||
if e, ok := err.(types.Error); ok {
|
||||
// useful for some ignores:
|
||||
// path := e.Fset.Position(e.Pos).String()
|
||||
ignore := false
|
||||
// TODO(rmmh): read ignores from a file, so this code can
|
||||
// be Kubernetes-agnostic. Unused ignores should be treated as
|
||||
// errors, to ensure coverage isn't overly broad.
|
||||
if strings.Contains(e.Msg, "GetOpenAPIDefinitions") {
|
||||
// TODO(rmmh): figure out why this happens.
|
||||
// cmd/kube-apiserver/app/server.go:392:70
|
||||
// test/integration/framework/master_utils.go:131:84
|
||||
ignore = true
|
||||
}
|
||||
if ignore {
|
||||
if *verbose {
|
||||
fmt.Println("ignoring error:", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
// TODO(rmmh): dedup errors across platforms?
|
||||
fmt.Fprintf(os.Stderr, "%sERROR(%s) %s\n", logPrefix, a.platform, err)
|
||||
a.failed = true
|
||||
}
|
||||
|
||||
// collect extracts test metadata from a file.
|
||||
func (a *analyzer) collect(dir string) {
|
||||
if _, ok := a.donePaths[dir]; ok {
|
||||
return
|
||||
}
|
||||
a.donePaths[dir] = nil
|
||||
|
||||
// Create the AST by parsing src.
|
||||
fs, err := parser.ParseDir(a.fset, dir, nil, parser.AllErrors)
|
||||
|
||||
if err != nil {
|
||||
fmt.Println(logPrefix+"ERROR(syntax)", err)
|
||||
a.failed = true
|
||||
return
|
||||
}
|
||||
|
||||
if len(fs) > 1 && *verbose {
|
||||
fmt.Println("multiple packages in dir:", dir)
|
||||
}
|
||||
|
||||
for _, p := range fs {
|
||||
// returns first error, but a.handleError deals with it
|
||||
files := a.filterFiles(p.Files)
|
||||
if *verbose {
|
||||
fmt.Printf("path: %s package: %s files: ", dir, p.Name)
|
||||
for _, f := range files {
|
||||
fname := filepath.Base(a.fset.File(f.Pos()).Name())
|
||||
fmt.Printf("%s ", fname)
|
||||
}
|
||||
fmt.Printf("\n")
|
||||
}
|
||||
a.typeCheck(dir, files)
|
||||
}
|
||||
}
|
||||
|
||||
// filterFiles restricts a list of files to only those that should be built by
|
||||
// the current platform. This includes both build suffixes (_windows.go) and build
|
||||
// tags ("// +build !linux" at the beginning).
|
||||
func (a *analyzer) filterFiles(fs map[string]*ast.File) []*ast.File {
|
||||
files := []*ast.File{}
|
||||
for _, f := range fs {
|
||||
fpath := a.fset.File(f.Pos()).Name()
|
||||
dir, name := filepath.Split(fpath)
|
||||
matches, err := a.ctx.MatchFile(dir, name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%sERROR reading %s: %s\n", logPrefix, fpath, err)
|
||||
a.failed = true
|
||||
continue
|
||||
}
|
||||
if matches {
|
||||
files = append(files, f)
|
||||
}
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
func (a *analyzer) typeCheck(dir string, files []*ast.File) error {
|
||||
info := types.Info{
|
||||
Defs: make(map[*ast.Ident]types.Object),
|
||||
Uses: make(map[*ast.Ident]types.Object),
|
||||
}
|
||||
|
||||
// NOTE: this type check does a *recursive* import, but srcimporter
|
||||
// doesn't do a full type check (ignores function bodies)-- this has
|
||||
// some additional overhead.
|
||||
//
|
||||
// This means that we need to ensure that typeCheck runs on all
|
||||
// code we will be compiling.
|
||||
//
|
||||
// TODO(rmmh): Customize our forked srcimporter to do this better.
|
||||
pkg, err := a.conf.Check(dir, a.fset, files, &info)
|
||||
if err != nil {
|
||||
return err // type error
|
||||
}
|
||||
|
||||
// A significant fraction of vendored code only compiles on Linux,
|
||||
// but it's only imported by code that has build-guards for Linux.
|
||||
// Track vendored code to type-check it in a second pass.
|
||||
for _, imp := range pkg.Imports() {
|
||||
if strings.HasPrefix(imp.Path(), "k8s.io/kubernetes/vendor/") {
|
||||
vendorPath := imp.Path()[len("k8s.io/kubernetes/"):]
|
||||
if *verbose {
|
||||
fmt.Println("recursively checking vendor path:", vendorPath)
|
||||
}
|
||||
a.collect(vendorPath)
|
||||
}
|
||||
}
|
||||
|
||||
if *defuses {
|
||||
for id, obj := range info.Defs {
|
||||
fmt.Printf("%s: %q defines %v\n",
|
||||
a.fset.Position(id.Pos()), id.Name, obj)
|
||||
}
|
||||
for id, obj := range info.Uses {
|
||||
fmt.Printf("%s: %q uses %v\n",
|
||||
a.fset.Position(id.Pos()), id.Name, obj)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type collector struct {
|
||||
dirs []string
|
||||
}
|
||||
|
||||
// handlePath walks the filesystem recursively, collecting directories,
|
||||
// ignoring some unneeded directories (hidden/vendored) that are handled
|
||||
// specially later.
|
||||
func (c *collector) handlePath(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if info.IsDir() {
|
||||
// Ignore hidden directories (.git, .cache, etc)
|
||||
if len(path) > 1 && path[0] == '.' ||
|
||||
// Staging code is symlinked from vendor/k8s.io, and uses import
|
||||
// paths as if it were inside of vendor/. It fails typechecking
|
||||
// inside of staging/, but works when typechecked as part of vendor/.
|
||||
path == "staging" ||
|
||||
// OS-specific vendor code tends to be imported by OS-specific
|
||||
// packages. We recursively typecheck imported vendored packages for
|
||||
// each OS, but don't typecheck everything for every OS.
|
||||
path == "vendor" ||
|
||||
path == "_output" ||
|
||||
// This is a weird one. /testdata/ is *mostly* ignored by Go,
|
||||
// and this translates to kubernetes/vendor not working.
|
||||
// edit/record.go doesn't compile without gopkg.in/yaml.v2
|
||||
// in $GOSRC/$GOROOT (both typecheck and the shell script).
|
||||
path == "pkg/kubectl/cmd/testdata/edit" {
|
||||
return filepath.SkipDir
|
||||
}
|
||||
c.dirs = append(c.dirs, path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
if *verbose {
|
||||
*serial = true // to avoid confusing interleaved logs
|
||||
}
|
||||
|
||||
if len(args) == 0 {
|
||||
args = append(args, ".")
|
||||
}
|
||||
|
||||
c := collector{}
|
||||
for _, arg := range args {
|
||||
err := filepath.Walk(arg, c.handlePath)
|
||||
if err != nil {
|
||||
log.Fatalf("Error walking: %v", err)
|
||||
}
|
||||
}
|
||||
sort.Strings(c.dirs)
|
||||
|
||||
ps := crossPlatforms[:]
|
||||
if *platforms != "" {
|
||||
ps = strings.Split(*platforms, ",")
|
||||
} else if !*cross {
|
||||
ps = ps[:1]
|
||||
}
|
||||
|
||||
fmt.Println("type-checking: ", strings.Join(ps, ", "))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var processedDirs int64
|
||||
var currentWork int64 // (dir_index << 8) | platform_index
|
||||
statuses := make([]int, len(ps))
|
||||
for i, p := range ps {
|
||||
wg.Add(1)
|
||||
fn := func(i int, p string) {
|
||||
start := time.Now()
|
||||
a := newAnalyzer(p)
|
||||
for n, dir := range c.dirs {
|
||||
a.collect(dir)
|
||||
atomic.AddInt64(&processedDirs, 1)
|
||||
atomic.StoreInt64(¤tWork, int64(n<<8|i))
|
||||
}
|
||||
if a.failed {
|
||||
statuses[i] = 1
|
||||
}
|
||||
if *timings {
|
||||
fmt.Printf("%s took %.1fs\n", p, time.Since(start).Seconds())
|
||||
}
|
||||
wg.Done()
|
||||
}
|
||||
if *serial {
|
||||
fn(i, p)
|
||||
} else {
|
||||
go fn(i, p)
|
||||
}
|
||||
}
|
||||
if isTerminal {
|
||||
logPrefix = "\r" // clear status bar when printing
|
||||
// Display a status bar so devs can estimate completion times.
|
||||
go func() {
|
||||
total := len(ps) * len(c.dirs)
|
||||
for proc := 0; proc < total; proc = int(atomic.LoadInt64(&processedDirs)) {
|
||||
work := atomic.LoadInt64(¤tWork)
|
||||
dir := c.dirs[work>>8]
|
||||
platform := ps[work&0xFF]
|
||||
if len(dir) > 80 {
|
||||
dir = dir[:80]
|
||||
}
|
||||
fmt.Printf("\r%d/%d \033[2m%-13s\033[0m %-80s", proc, total, platform, dir)
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
}
|
||||
}()
|
||||
}
|
||||
wg.Wait()
|
||||
fmt.Println()
|
||||
for _, status := range statuses {
|
||||
if status != 0 {
|
||||
os.Exit(status)
|
||||
}
|
||||
}
|
||||
}
|
Reference in New Issue
Block a user