Vendor gazelle

This commit is contained in:
Tim Hockin
2017-12-22 16:49:04 -08:00
parent 4685df26dd
commit 3e583de0ac
106 changed files with 18281 additions and 1975 deletions

15
vendor/github.com/bazelbuild/buildtools/CONTRIBUTORS generated vendored Normal file
View File

@@ -0,0 +1,15 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
Paul Bethe <pbethe@google.com>
Russ Cox <rsc@google.com>
Laurent Le Brun <laurentlb@google.com>
Justine Alexandra Roberts Tunney <jart@google.com>
Nilton Volpato <nilton@google.com>

13
vendor/github.com/bazelbuild/buildtools/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,13 @@
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

32
vendor/github.com/bazelbuild/buildtools/build/BUILD generated vendored Normal file
View File

@@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"lex.go",
"parse.y.go",
"print.go",
"quote.go",
"rewrite.go",
"rule.go",
"syntax.go",
"walk.go",
],
importpath = "github.com/bazelbuild/buildtools/build",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/bazelbuild/buildtools/tables:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,52 @@
"""Provides go_yacc and genfile_check_test
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
_GO_YACC_TOOL = "@org_golang_x_tools//cmd/goyacc"
def go_yacc(src, out, visibility=None):
"""Runs go tool yacc -o $out $src."""
native.genrule(
name = src + ".go_yacc",
srcs = [src],
outs = [out],
tools = [_GO_YACC_TOOL],
cmd = ("export GOROOT=$$(dirname $(location " + _GO_YACC_TOOL + "))/..;" +
" $(location " + _GO_YACC_TOOL + ") " +
" -o $(location " + out + ") $(SRCS)"),
visibility = visibility,
local = 1,
)
def genfile_check_test(src, gen):
"""Asserts that any checked-in generated code matches regen."""
if not src:
fail("src is required", "src")
if not gen:
fail("gen is required", "gen")
native.genrule(
name = src + "_checksh",
outs = [src + "_check.sh"],
cmd = "echo 'diff $$@' > $@",
)
native.sh_test(
name = src + "_checkshtest",
size = "small",
srcs = [src + "_check.sh"],
data = [src, gen],
args = ["$(location " + src + ")", "$(location " + gen + ")"],
)

772
vendor/github.com/bazelbuild/buildtools/build/lex.go generated vendored Normal file
View File

@@ -0,0 +1,772 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Lexical scanning for BUILD file parser.
package build
import (
"bytes"
"fmt"
"strings"
"unicode/utf8"
)
// Parse parses the input data and returns the corresponding parse tree.
//
// The filename is used only for generating error messages.
func Parse(filename string, data []byte) (*File, error) {
in := newInput(filename, data)
return in.parse()
}
// An input represents a single input file being parsed.
type input struct {
// Lexing state.
filename string // name of input file, for errors
complete []byte // entire input
remaining []byte // remaining input
token []byte // token being scanned
lastToken string // most recently returned token, for error messages
pos Position // current input position
comments []Comment // accumulated comments
endRule int // position of end of current rule
depth int // nesting of [ ] { } ( )
// Parser state.
file *File // returned top-level syntax tree
parseError error // error encountered during parsing
// Comment assignment state.
pre []Expr // all expressions, in preorder traversal
post []Expr // all expressions, in postorder traversal
}
func newInput(filename string, data []byte) *input {
return &input{
filename: filename,
complete: data,
remaining: data,
pos: Position{Line: 1, LineRune: 1, Byte: 0},
}
}
// parse parses the input file.
func (in *input) parse() (f *File, err error) {
// The parser panics for both routine errors like syntax errors
// and for programmer bugs like array index errors.
// Turn both into error returns. Catching bug panics is
// especially important when processing many files.
defer func() {
if e := recover(); e != nil {
if e == in.parseError {
err = in.parseError
} else {
err = fmt.Errorf("%s:%d:%d: internal error: %v", in.filename, in.pos.Line, in.pos.LineRune, e)
}
}
}()
// Invoke the parser generated from parse.y.
yyParse(in)
if in.parseError != nil {
return nil, in.parseError
}
in.file.Path = in.filename
// Assign comments to nearby syntax.
in.assignComments()
return in.file, nil
}
// Error is called to report an error.
// When called by the generated code s is always "syntax error".
// Error does not return: it panics.
func (in *input) Error(s string) {
if s == "syntax error" && in.lastToken != "" {
s += " near " + in.lastToken
}
in.parseError = fmt.Errorf("%s:%d:%d: %v", in.filename, in.pos.Line, in.pos.LineRune, s)
panic(in.parseError)
}
// eof reports whether the input has reached end of file.
func (in *input) eof() bool {
return len(in.remaining) == 0
}
// peekRune returns the next rune in the input without consuming it.
func (in *input) peekRune() int {
if len(in.remaining) == 0 {
return 0
}
r, _ := utf8.DecodeRune(in.remaining)
return int(r)
}
// readRune consumes and returns the next rune in the input.
func (in *input) readRune() int {
if len(in.remaining) == 0 {
in.Error("internal lexer error: readRune at EOF")
}
r, size := utf8.DecodeRune(in.remaining)
in.remaining = in.remaining[size:]
if r == '\n' {
in.pos.Line++
in.pos.LineRune = 1
} else {
in.pos.LineRune++
}
in.pos.Byte += size
return int(r)
}
// startToken marks the beginning of the next input token.
// It must be followed by a call to endToken, once the token has
// been consumed using readRune.
func (in *input) startToken(val *yySymType) {
in.token = in.remaining
val.tok = ""
val.pos = in.pos
}
// yySymType (used in the next few functions) is defined by the
// generated parser. It is a struct containing all the fields listed
// in parse.y's %union [sic] section.
// endToken marks the end of an input token.
// It records the actual token string in val.tok if the caller
// has not done that already.
func (in *input) endToken(val *yySymType) {
if val.tok == "" {
tok := string(in.token[:len(in.token)-len(in.remaining)])
val.tok = tok
in.lastToken = val.tok
}
}
// Lex is called from the generated parser to obtain the next input token.
// It returns the token value (either a rune like '+' or a symbolic token _FOR)
// and sets val to the data associated with the token.
//
// For all our input tokens, the associated data is
// val.Pos (the position where the token begins)
// and val.Token (the input string corresponding to the token).
func (in *input) Lex(val *yySymType) int {
// Skip past spaces, stopping at non-space or EOF.
countNL := 0 // number of newlines we've skipped past
for !in.eof() {
// The parser does not track indentation, because for the most part
// BUILD expressions don't care about how they are indented.
// However, we do need to be able to distinguish
//
// x = y[0]
//
// from the occasional
//
// x = y
// [0]
//
// To handle this one case, when we reach the beginning of a
// top-level BUILD expression, we scan forward to see where
// it should end and record the number of input bytes remaining
// at that endpoint. When we reach that point in the input, we
// insert an implicit semicolon to force the two expressions
// to stay separate.
//
if in.endRule != 0 && len(in.remaining) == in.endRule {
in.endRule = 0
in.lastToken = "implicit ;"
val.tok = ";"
return ';'
}
// Skip over spaces. Count newlines so we can give the parser
// information about where top-level blank lines are,
// for top-level comment assignment.
c := in.peekRune()
if c == ' ' || c == '\t' || c == '\r' || c == '\n' {
if c == '\n' && in.endRule == 0 {
// Not in a rule. Tell parser about top-level blank line.
in.startToken(val)
in.readRune()
in.endToken(val)
return '\n'
}
if c == '\n' {
countNL++
}
in.readRune()
continue
}
// Comment runs to end of line.
if c == '#' {
// Is this comment the only thing on its line?
// Find the last \n before this # and see if it's all
// spaces from there to here.
// If it's a suffix comment but the last non-space symbol before
// it is one of (, [, or {, treat it as a line comment that should be
// put inside the corresponding block.
i := bytes.LastIndex(in.complete[:in.pos.Byte], []byte("\n"))
prefix := bytes.TrimSpace(in.complete[i+1 : in.pos.Byte])
isSuffix := true
if len(prefix) == 0 ||
prefix[len(prefix)-1] == '[' ||
prefix[len(prefix)-1] == '(' ||
prefix[len(prefix)-1] == '{' {
isSuffix = false
}
// Consume comment.
in.startToken(val)
for len(in.remaining) > 0 && in.readRune() != '\n' {
}
in.endToken(val)
val.tok = strings.TrimRight(val.tok, "\n")
in.lastToken = "comment"
// If we are at top level (not in a rule), hand the comment to
// the parser as a _COMMENT token. The grammar is written
// to handle top-level comments itself.
if in.endRule == 0 {
// Not in a rule. Tell parser about top-level comment.
return _COMMENT
}
// Otherwise, save comment for later attachment to syntax tree.
if countNL > 1 {
in.comments = append(in.comments, Comment{val.pos, "", false})
}
in.comments = append(in.comments, Comment{val.pos, val.tok, isSuffix})
countNL = 1
continue
}
if c == '\\' && len(in.remaining) >= 2 && in.remaining[1] == '\n' {
// We can ignore a trailing \ at end of line.
in.readRune()
continue
}
// Found non-space non-comment.
break
}
// Found the beginning of the next token.
in.startToken(val)
defer in.endToken(val)
// End of file.
if in.eof() {
in.lastToken = "EOF"
return _EOF
}
// If endRule is 0, we need to recompute where the end
// of the next rule (Python expression) is, so that we can
// generate a virtual end-of-rule semicolon (see above).
if in.endRule == 0 {
in.endRule = len(in.skipPython(in.remaining))
if in.endRule == 0 {
// skipPython got confused.
// No more virtual semicolons.
in.endRule = -1
}
}
// Punctuation tokens.
switch c := in.peekRune(); c {
case '[', '(', '{':
in.depth++
in.readRune()
return c
case ']', ')', '}':
in.depth--
in.readRune()
return c
case '.', '-', '%', ':', ';', ',', '/', '*': // single-char tokens
in.readRune()
return c
case '<', '>', '=', '!', '+': // possibly followed by =
in.readRune()
if in.peekRune() == '=' {
in.readRune()
switch c {
case '<':
return _LE
case '>':
return _GE
case '=':
return _EQ
case '!':
return _NE
case '+':
return _ADDEQ
}
}
return c
case 'r': // possible beginning of raw quoted string
if len(in.remaining) < 2 || in.remaining[1] != '"' && in.remaining[1] != '\'' {
break
}
in.readRune()
c = in.peekRune()
fallthrough
case '"', '\'': // quoted string
quote := c
if len(in.remaining) >= 3 && in.remaining[0] == byte(quote) && in.remaining[1] == byte(quote) && in.remaining[2] == byte(quote) {
// Triple-quoted string.
in.readRune()
in.readRune()
in.readRune()
var c1, c2, c3 int
for {
if in.eof() {
in.pos = val.pos
in.Error("unexpected EOF in string")
}
c1, c2, c3 = c2, c3, in.readRune()
if c1 == quote && c2 == quote && c3 == quote {
break
}
if c3 == '\\' {
if in.eof() {
in.pos = val.pos
in.Error("unexpected EOF in string")
}
in.readRune()
}
}
} else {
in.readRune()
for {
if in.eof() {
in.pos = val.pos
in.Error("unexpected EOF in string")
}
if in.peekRune() == '\n' {
in.Error("unexpected newline in string")
}
c := in.readRune()
if c == quote {
break
}
if c == '\\' {
if in.eof() {
in.pos = val.pos
in.Error("unexpected EOF in string")
}
in.readRune()
}
}
}
in.endToken(val)
s, triple, err := unquote(val.tok)
if err != nil {
in.Error(fmt.Sprint(err))
}
val.str = s
val.triple = triple
return _STRING
}
// Checked all punctuation. Must be identifier token.
if c := in.peekRune(); !isIdent(c) {
in.Error(fmt.Sprintf("unexpected input character %#q", c))
}
// Look for raw Python block (class, def, if, etc at beginning of line) and pass through.
if in.depth == 0 && in.pos.LineRune == 1 && hasPythonPrefix(in.remaining) {
// Find end of Python block and advance input beyond it.
// Have to loop calling readRune in order to maintain line number info.
rest := in.skipPython(in.remaining)
for len(in.remaining) > len(rest) {
in.readRune()
}
return _PYTHON
}
// Scan over alphanumeric identifier.
for {
c := in.peekRune()
if !isIdent(c) {
break
}
in.readRune()
}
// Call endToken to set val.tok to identifier we just scanned,
// so we can look to see if val.tok is a keyword.
in.endToken(val)
if k := keywordToken[val.tok]; k != 0 {
return k
}
return _IDENT
}
// isIdent reports whether c is an identifier rune.
// We treat all non-ASCII runes as identifier runes.
func isIdent(c int) bool {
return '0' <= c && c <= '9' ||
'A' <= c && c <= 'Z' ||
'a' <= c && c <= 'z' ||
c == '_' ||
c >= 0x80
}
// keywordToken records the special tokens for
// strings that should not be treated as ordinary identifiers.
var keywordToken = map[string]int{
"and": _AND,
"for": _FOR,
"if": _IF,
"else": _ELSE,
"in": _IN,
"is": _IS,
"lambda": _LAMBDA,
"not": _NOT,
"or": _OR,
}
// Python scanning.
// About 1% of BUILD files embed arbitrary Python into the file.
// We do not attempt to parse it. Instead, we lex just enough to scan
// beyond it, treating the Python block as an unintepreted blob.
// hasPythonPrefix reports whether p begins with a keyword that would
// introduce an uninterpreted Python block.
func hasPythonPrefix(p []byte) bool {
for _, pre := range prefixes {
if hasPrefixSpace(p, pre) {
return true
}
}
return false
}
// These keywords introduce uninterpreted Python blocks.
var prefixes = []string{
"assert",
"class",
"def",
"del",
"for",
"if",
"try",
}
// hasPrefixSpace reports whether p begins with pre followed by a space or colon.
func hasPrefixSpace(p []byte, pre string) bool {
if len(p) <= len(pre) || p[len(pre)] != ' ' && p[len(pre)] != '\t' && p[len(pre)] != ':' {
return false
}
for i := range pre {
if p[i] != pre[i] {
return false
}
}
return true
}
func isBlankOrComment(b []byte) bool {
for _, c := range b {
if c == '#' || c == '\n' {
return true
}
if c != ' ' && c != '\t' && c != '\r' {
return false
}
}
return true
}
// hasPythonContinuation reports whether p begins with a keyword that
// continues an uninterpreted Python block.
func hasPythonContinuation(p []byte) bool {
for _, pre := range continuations {
if hasPrefixSpace(p, pre) {
return true
}
}
return false
}
// These keywords continue uninterpreted Python blocks.
var continuations = []string{
"except",
"else",
}
// skipPython returns the data remaining after the uninterpreted
// Python block beginning at p. It does not advance the input position.
// (The only reason for the input receiver is to be able to call in.Error.)
func (in *input) skipPython(p []byte) []byte {
quote := byte(0) // if non-zero, the kind of quote we're in
tripleQuote := false // if true, the quote is a triple quote
depth := 0 // nesting depth for ( ) [ ] { }
var rest []byte // data after the Python block
// Scan over input one byte at a time until we find
// an unindented, non-blank, non-comment line
// outside quoted strings and brackets.
for i := 0; i < len(p); i++ {
c := p[i]
if quote != 0 && c == quote && !tripleQuote {
quote = 0
continue
}
if quote != 0 && c == quote && tripleQuote && i+2 < len(p) && p[i+1] == quote && p[i+2] == quote {
i += 2
quote = 0
tripleQuote = false
continue
}
if quote != 0 {
if c == '\\' {
i++ // skip escaped char
}
continue
}
if c == '\'' || c == '"' {
if i+2 < len(p) && p[i+1] == c && p[i+2] == c {
quote = c
tripleQuote = true
i += 2
continue
}
quote = c
continue
}
if depth == 0 && i > 0 && p[i-1] == '\n' && (i < 2 || p[i-2] != '\\') {
// Possible stopping point. Save the earliest one we find.
if rest == nil {
rest = p[i:]
}
if !isBlankOrComment(p[i:]) {
if !hasPythonContinuation(p[i:]) && c != ' ' && c != '\t' {
// Yes, stop here.
break
}
// Not a stopping point after all.
rest = nil
}
}
switch c {
case '#':
// Skip comment.
for i < len(p) && p[i] != '\n' {
i++
}
case '(', '[', '{':
depth++
case ')', ']', '}':
depth--
}
}
if quote != 0 {
in.Error("EOF scanning Python quoted string")
}
return rest
}
// Comment assignment.
// We build two lists of all subexpressions, preorder and postorder.
// The preorder list is ordered by start location, with outer expressions first.
// The postorder list is ordered by end location, with outer expressions last.
// We use the preorder list to assign each whole-line comment to the syntax
// immediately following it, and we use the postorder list to assign each
// end-of-line comment to the syntax immediately preceding it.
// order walks the expression adding it and its subexpressions to the
// preorder and postorder lists.
func (in *input) order(v Expr) {
if v != nil {
in.pre = append(in.pre, v)
}
switch v := v.(type) {
default:
panic(fmt.Errorf("order: unexpected type %T", v))
case nil:
// nothing
case *End:
// nothing
case *File:
for _, stmt := range v.Stmt {
in.order(stmt)
}
case *CommentBlock:
// nothing
case *CallExpr:
in.order(v.X)
for _, x := range v.List {
in.order(x)
}
in.order(&v.End)
case *PythonBlock:
// nothing
case *LiteralExpr:
// nothing
case *StringExpr:
// nothing
case *DotExpr:
in.order(v.X)
case *ListExpr:
for _, x := range v.List {
in.order(x)
}
in.order(&v.End)
case *ListForExpr:
in.order(v.X)
for _, c := range v.For {
in.order(c)
}
in.order(&v.End)
case *SetExpr:
for _, x := range v.List {
in.order(x)
}
in.order(&v.End)
case *ForClauseWithIfClausesOpt:
in.order(v.For)
for _, c := range v.Ifs {
in.order(c)
}
case *ForClause:
for _, name := range v.Var {
in.order(name)
}
in.order(v.Expr)
case *IfClause:
in.order(v.Cond)
case *KeyValueExpr:
in.order(v.Key)
in.order(v.Value)
case *DictExpr:
for _, x := range v.List {
in.order(x)
}
in.order(&v.End)
case *TupleExpr:
for _, x := range v.List {
in.order(x)
}
in.order(&v.End)
case *UnaryExpr:
in.order(v.X)
case *BinaryExpr:
in.order(v.X)
in.order(v.Y)
case *ConditionalExpr:
in.order(v.Then)
in.order(v.Test)
in.order(v.Else)
case *ParenExpr:
in.order(v.X)
in.order(&v.End)
case *SliceExpr:
in.order(v.X)
in.order(v.Y)
in.order(v.Z)
case *IndexExpr:
in.order(v.X)
in.order(v.Y)
case *LambdaExpr:
for _, name := range v.Var {
in.order(name)
}
in.order(v.Expr)
}
if v != nil {
in.post = append(in.post, v)
}
}
// assignComments attaches comments to nearby syntax.
func (in *input) assignComments() {
// Generate preorder and postorder lists.
in.order(in.file)
// Split into whole-line comments and suffix comments.
var line, suffix []Comment
for _, com := range in.comments {
if com.Suffix {
suffix = append(suffix, com)
} else {
line = append(line, com)
}
}
// Assign line comments to syntax immediately following.
for _, x := range in.pre {
start, _ := x.Span()
xcom := x.Comment()
for len(line) > 0 && start.Byte >= line[0].Start.Byte {
xcom.Before = append(xcom.Before, line[0])
line = line[1:]
}
}
// Remaining line comments go at end of file.
in.file.After = append(in.file.After, line...)
// Assign suffix comments to syntax immediately before.
for i := len(in.post) - 1; i >= 0; i-- {
x := in.post[i]
// Do not assign suffix comments to file
switch x.(type) {
case *File:
continue
}
_, end := x.Span()
xcom := x.Comment()
for len(suffix) > 0 && end.Byte <= suffix[len(suffix)-1].Start.Byte {
xcom.Suffix = append(xcom.Suffix, suffix[len(suffix)-1])
suffix = suffix[:len(suffix)-1]
}
}
// We assigned suffix comments in reverse.
// If multiple suffix comments were appended to the same
// expression node, they are now in reverse. Fix that.
for _, x := range in.post {
reverseComments(x.Comment().Suffix)
}
// Remaining suffix comments go at beginning of file.
in.file.Before = append(in.file.Before, suffix...)
}
// reverseComments reverses the []Comment list.
func reverseComments(list []Comment) {
for i, j := 0, len(list)-1; i < j; i, j = i+1, j-1 {
list[i], list[j] = list[j], list[i]
}
}

684
vendor/github.com/bazelbuild/buildtools/build/parse.y generated vendored Normal file
View File

@@ -0,0 +1,684 @@
// BUILD file parser.
// This is a yacc grammar. Its lexer is in lex.go.
//
// For a good introduction to writing yacc grammars, see
// Kernighan and Pike's book The Unix Programming Environment.
//
// The definitive yacc manual is
// Stephen C. Johnson and Ravi Sethi, "Yacc: A Parser Generator",
// online at http://plan9.bell-labs.com/sys/doc/yacc.pdf.
%{
package build
%}
// The generated parser puts these fields in a struct named yySymType.
// (The name %union is historical, but it is inaccurate for Go.)
%union {
// input tokens
tok string // raw input syntax
str string // decoding of quoted string
pos Position // position of token
triple bool // was string triple quoted?
// partial syntax trees
expr Expr
exprs []Expr
forc *ForClause
ifs []*IfClause
forifs *ForClauseWithIfClausesOpt
forsifs []*ForClauseWithIfClausesOpt
string *StringExpr
strings []*StringExpr
// supporting information
comma Position // position of trailing comma in list, if present
lastRule Expr // most recent rule, to attach line comments to
}
// These declarations set the type for a $ reference ($$, $1, $2, ...)
// based on the kind of symbol it refers to. Other fields can be referred
// to explicitly, as in $<tok>1.
//
// %token is for input tokens generated by the lexer.
// %type is for higher-level grammar rules defined here.
//
// It is possible to put multiple tokens per line, but it is easier to
// keep ordered using a sparser one-per-line list.
%token <pos> '%'
%token <pos> '('
%token <pos> ')'
%token <pos> '*'
%token <pos> '+'
%token <pos> ','
%token <pos> '-'
%token <pos> '.'
%token <pos> '/'
%token <pos> ':'
%token <pos> '<'
%token <pos> '='
%token <pos> '>'
%token <pos> '['
%token <pos> ']'
%token <pos> '{'
%token <pos> '}'
// By convention, yacc token names are all caps.
// However, we do not want to export them from the Go package
// we are creating, so prefix them all with underscores.
%token <pos> _ADDEQ // operator +=
%token <pos> _AND // keyword and
%token <pos> _COMMENT // top-level # comment
%token <pos> _EOF // end of file
%token <pos> _EQ // operator ==
%token <pos> _FOR // keyword for
%token <pos> _GE // operator >=
%token <pos> _IDENT // non-keyword identifier or number
%token <pos> _IF // keyword if
%token <pos> _ELSE // keyword else
%token <pos> _IN // keyword in
%token <pos> _IS // keyword is
%token <pos> _LAMBDA // keyword lambda
%token <pos> _LE // operator <=
%token <pos> _NE // operator !=
%token <pos> _NOT // keyword not
%token <pos> _OR // keyword or
%token <pos> _PYTHON // uninterpreted Python block
%token <pos> _STRING // quoted string
%type <pos> comma_opt
%type <expr> expr
%type <expr> expr_opt
%type <exprs> exprs
%type <exprs> exprs_opt
%type <forc> for_clause
%type <forifs> for_clause_with_if_clauses_opt
%type <forsifs> for_clauses_with_if_clauses_opt
%type <expr> ident
%type <exprs> idents
%type <ifs> if_clauses_opt
%type <exprs> stmts
%type <expr> stmt
%type <expr> keyvalue
%type <exprs> keyvalues
%type <exprs> keyvalues_no_comma
%type <string> string
%type <strings> strings
// Operator precedence.
// Operators listed lower in the table bind tighter.
// We tag rules with this fake, low precedence to indicate
// that when the rule is involved in a shift/reduce
// conflict, we prefer that the parser shift (try for a longer parse).
// Shifting is the default resolution anyway, but stating it explicitly
// silences yacc's warning for that specific case.
%left ShiftInstead
%left '\n'
%left _ASSERT
// '=' and '+=' have the lowest precedence
// e.g. "x = a if c > 0 else 'bar'"
// followed by
// 'if' and 'else' which have lower precedence than all other operators.
// e.g. "a, b if c > 0 else 'foo'" is either a tuple of (a,b) or 'foo'
// and not a tuple of "(a, (b if ... ))"
%left '=' _ADDEQ
%left _IF _ELSE
%left ','
%left ':'
%left _IN _NOT _IS
%left _OR
%left _AND
%left '<' '>' _EQ _NE _LE _GE
%left '+' '-'
%left '*' '/' '%'
%left '.' '[' '('
%right _UNARY
%left _STRING
%%
// Grammar rules.
//
// A note on names: if foo is a rule, then foos is a sequence of foos
// (with interleaved commas or other syntax as appropriate)
// and foo_opt is an optional foo.
file:
stmts _EOF
{
yylex.(*input).file = &File{Stmt: $1}
return 0
}
stmts:
{
$$ = nil
$<lastRule>$ = nil
}
| stmts stmt comma_opt semi_opt
{
// If this statement follows a comment block,
// attach the comments to the statement.
if cb, ok := $<lastRule>1.(*CommentBlock); ok {
$$ = $1
$$[len($1)-1] = $2
$2.Comment().Before = cb.After
$<lastRule>$ = $2
break
}
// Otherwise add to list.
$$ = append($1, $2)
$<lastRule>$ = $2
// Consider this input:
//
// foo()
// # bar
// baz()
//
// If we've just parsed baz(), the # bar is attached to
// foo() as an After comment. Make it a Before comment
// for baz() instead.
if x := $<lastRule>1; x != nil {
com := x.Comment()
$2.Comment().Before = com.After
com.After = nil
}
}
| stmts '\n'
{
// Blank line; sever last rule from future comments.
$$ = $1
$<lastRule>$ = nil
}
| stmts _COMMENT
{
$$ = $1
$<lastRule>$ = $<lastRule>1
if $<lastRule>$ == nil {
cb := &CommentBlock{Start: $2}
$$ = append($$, cb)
$<lastRule>$ = cb
}
com := $<lastRule>$.Comment()
com.After = append(com.After, Comment{Start: $2, Token: $<tok>2})
}
stmt:
expr %prec ShiftInstead
| _PYTHON
{
$$ = &PythonBlock{Start: $1, Token: $<tok>1}
}
semi_opt:
| semi_opt ';'
expr:
ident
| strings %prec ShiftInstead
{
if len($1) == 1 {
$$ = $1[0]
break
}
$$ = $1[0]
for _, x := range $1[1:] {
_, end := $$.Span()
$$ = binary($$, end, "+", x)
}
}
| '[' exprs_opt ']'
{
$$ = &ListExpr{
Start: $1,
List: $2,
Comma: $<comma>2,
End: End{Pos: $3},
ForceMultiLine: forceMultiLine($1, $2, $3),
}
}
| '[' expr for_clauses_with_if_clauses_opt ']'
{
exprStart, _ := $2.Span()
$$ = &ListForExpr{
Brack: "[]",
Start: $1,
X: $2,
For: $3,
End: End{Pos: $4},
ForceMultiLine: $1.Line != exprStart.Line,
}
}
| '(' expr for_clauses_with_if_clauses_opt ')'
{
exprStart, _ := $2.Span()
$$ = &ListForExpr{
Brack: "()",
Start: $1,
X: $2,
For: $3,
End: End{Pos: $4},
ForceMultiLine: $1.Line != exprStart.Line,
}
}
| '{' keyvalue for_clauses_with_if_clauses_opt '}'
{
exprStart, _ := $2.Span()
$$ = &ListForExpr{
Brack: "{}",
Start: $1,
X: $2,
For: $3,
End: End{Pos: $4},
ForceMultiLine: $1.Line != exprStart.Line,
}
}
| '{' keyvalues '}'
{
$$ = &DictExpr{
Start: $1,
List: $2,
Comma: $<comma>2,
End: End{Pos: $3},
ForceMultiLine: forceMultiLine($1, $2, $3),
}
}
| '{' exprs_opt '}'
{
$$ = &SetExpr{
Start: $1,
List: $2,
Comma: $<comma>2,
End: End{Pos: $3},
ForceMultiLine: forceMultiLine($1, $2, $3),
}
}
| '(' exprs_opt ')'
{
if len($2) == 1 && $<comma>2.Line == 0 {
// Just a parenthesized expression, not a tuple.
$$ = &ParenExpr{
Start: $1,
X: $2[0],
End: End{Pos: $3},
ForceMultiLine: forceMultiLine($1, $2, $3),
}
} else {
$$ = &TupleExpr{
Start: $1,
List: $2,
Comma: $<comma>2,
End: End{Pos: $3},
ForceCompact: forceCompact($1, $2, $3),
ForceMultiLine: forceMultiLine($1, $2, $3),
}
}
}
| expr '.' _IDENT
{
$$ = &DotExpr{
X: $1,
Dot: $2,
NamePos: $3,
Name: $<tok>3,
}
}
| expr '(' exprs_opt ')'
{
$$ = &CallExpr{
X: $1,
ListStart: $2,
List: $3,
End: End{Pos: $4},
ForceCompact: forceCompact($2, $3, $4),
ForceMultiLine: forceMultiLine($2, $3, $4),
}
}
| expr '(' expr for_clauses_with_if_clauses_opt ')'
{
$$ = &CallExpr{
X: $1,
ListStart: $2,
List: []Expr{
&ListForExpr{
Brack: "",
Start: $2,
X: $3,
For: $4,
End: End{Pos: $5},
},
},
End: End{Pos: $5},
}
}
| expr '[' expr ']'
{
$$ = &IndexExpr{
X: $1,
IndexStart: $2,
Y: $3,
End: $4,
}
}
| expr '[' expr_opt ':' expr_opt ']'
{
$$ = &SliceExpr{
X: $1,
SliceStart: $2,
Y: $3,
Colon: $4,
Z: $5,
End: $6,
}
}
| _LAMBDA exprs ':' expr
{
$$ = &LambdaExpr{
Lambda: $1,
Var: $2,
Colon: $3,
Expr: $4,
}
}
| '-' expr %prec _UNARY { $$ = unary($1, $<tok>1, $2) }
| _NOT expr %prec _UNARY { $$ = unary($1, $<tok>1, $2) }
| '*' expr %prec _UNARY { $$ = unary($1, $<tok>1, $2) }
| expr '*' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr '%' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr '/' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr '+' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr '-' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr '<' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr '>' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _EQ expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _LE expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _NE expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _GE expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr '=' expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _ADDEQ expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _IN expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _NOT _IN expr { $$ = binary($1, $2, "not in", $4) }
| expr _OR expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _AND expr { $$ = binary($1, $2, $<tok>2, $3) }
| expr _IS expr
{
if b, ok := $3.(*UnaryExpr); ok && b.Op == "not" {
$$ = binary($1, $2, "is not", b.X)
} else {
$$ = binary($1, $2, $<tok>2, $3)
}
}
| expr _IF expr _ELSE expr
{
$$ = &ConditionalExpr{
Then: $1,
IfStart: $2,
Test: $3,
ElseStart: $4,
Else: $5,
}
}
expr_opt:
{
$$ = nil
}
| expr
// comma_opt is an optional comma. If the comma is present,
// the rule's value is the position of the comma. Otherwise
// the rule's value is the zero position. Tracking this
// lets us distinguish (x) and (x,).
comma_opt:
{
$$ = Position{}
}
| ','
keyvalue:
expr ':' expr {
$$ = &KeyValueExpr{
Key: $1,
Colon: $2,
Value: $3,
}
}
keyvalues_no_comma:
keyvalue
{
$$ = []Expr{$1}
}
| keyvalues_no_comma ',' keyvalue
{
$$ = append($1, $3)
}
keyvalues:
keyvalues_no_comma
{
$$ = $1
}
| keyvalues_no_comma ','
{
$$ = $1
}
exprs:
expr
{
$$ = []Expr{$1}
}
| exprs ',' expr
{
$$ = append($1, $3)
}
exprs_opt:
{
$$, $<comma>$ = nil, Position{}
}
| exprs comma_opt
{
$$, $<comma>$ = $1, $2
}
string:
_STRING
{
$$ = &StringExpr{
Start: $1,
Value: $<str>1,
TripleQuote: $<triple>1,
End: $1.add($<tok>1),
Token: $<tok>1,
}
}
strings:
string
{
$$ = []*StringExpr{$1}
}
| strings string
{
$$ = append($1, $2)
}
ident:
_IDENT
{
$$ = &LiteralExpr{Start: $1, Token: $<tok>1}
}
idents:
ident
{
$$ = []Expr{$1}
}
| idents ',' ident
{
$$ = append($1, $3)
}
for_clause:
_FOR idents _IN expr
{
$$ = &ForClause{
For: $1,
Var: $2,
In: $3,
Expr: $4,
}
}
| _FOR '(' idents ')' _IN expr
{
$$ = &ForClause{
For: $1,
Var: $3,
In: $5,
Expr: $6,
}
}
for_clause_with_if_clauses_opt:
for_clause if_clauses_opt {
$$ = &ForClauseWithIfClausesOpt{
For: $1,
Ifs: $2,
}
}
for_clauses_with_if_clauses_opt:
for_clause_with_if_clauses_opt
{
$$ = []*ForClauseWithIfClausesOpt{$1}
}
| for_clauses_with_if_clauses_opt for_clause_with_if_clauses_opt {
$$ = append($1, $2)
}
if_clauses_opt:
{
$$ = nil
}
| if_clauses_opt _IF expr
{
$$ = append($1, &IfClause{
If: $2,
Cond: $3,
})
}
%%
// Go helper code.
// unary returns a unary expression with the given
// position, operator, and subexpression.
func unary(pos Position, op string, x Expr) Expr {
return &UnaryExpr{
OpStart: pos,
Op: op,
X: x,
}
}
// binary returns a binary expression with the given
// operands, position, and operator.
func binary(x Expr, pos Position, op string, y Expr) Expr {
_, xend := x.Span()
ystart, _ := y.Span()
return &BinaryExpr{
X: x,
OpStart: pos,
Op: op,
LineBreak: xend.Line < ystart.Line,
Y: y,
}
}
// forceCompact returns the setting for the ForceCompact field for a call or tuple.
//
// NOTE 1: The field is called ForceCompact, not ForceSingleLine,
// because it only affects the formatting associated with the call or tuple syntax,
// not the formatting of the arguments. For example:
//
// call([
// 1,
// 2,
// 3,
// ])
//
// is still a compact call even though it runs on multiple lines.
//
// In contrast the multiline form puts a linebreak after the (.
//
// call(
// [
// 1,
// 2,
// 3,
// ],
// )
//
// NOTE 2: Because of NOTE 1, we cannot use start and end on the
// same line as a signal for compact mode: the formatting of an
// embedded list might move the end to a different line, which would
// then look different on rereading and cause buildifier not to be
// idempotent. Instead, we have to look at properties guaranteed
// to be preserved by the reformatting, namely that the opening
// paren and the first expression are on the same line and that
// each subsequent expression begins on the same line as the last
// one ended (no line breaks after comma).
func forceCompact(start Position, list []Expr, end Position) bool {
if len(list) <= 1 {
// The call or tuple will probably be compact anyway; don't force it.
return false
}
// If there are any named arguments or non-string, non-literal
// arguments, cannot force compact mode.
line := start.Line
for _, x := range list {
start, end := x.Span()
if start.Line != line {
return false
}
line = end.Line
switch x.(type) {
case *LiteralExpr, *StringExpr, *UnaryExpr:
// ok
default:
return false
}
}
return end.Line == line
}
// forceMultiLine returns the setting for the ForceMultiLine field.
func forceMultiLine(start Position, list []Expr, end Position) bool {
if len(list) > 1 {
// The call will be multiline anyway, because it has multiple elements. Don't force it.
return false
}
if len(list) == 0 {
// Empty list: use position of brackets.
return start.Line != end.Line
}
// Single-element list.
// Check whether opening bracket is on different line than beginning of
// element, or closing bracket is on different line than end of element.
elemStart, elemEnd := list[0].Span()
return start.Line != elemStart.Line || end.Line != elemEnd.Line
}

1306
vendor/github.com/bazelbuild/buildtools/build/parse.y.go generated vendored Executable file

File diff suppressed because it is too large Load Diff

614
vendor/github.com/bazelbuild/buildtools/build/print.go generated vendored Normal file
View File

@@ -0,0 +1,614 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Printing of syntax trees.
package build
import (
"bytes"
"fmt"
"strings"
)
// Format returns the formatted form of the given BUILD file.
func Format(f *File) []byte {
pr := &printer{}
pr.file(f)
return pr.Bytes()
}
// FormatString returns the string form of the given expression.
func FormatString(x Expr) string {
pr := &printer{}
switch x := x.(type) {
case *File:
pr.file(x)
default:
pr.expr(x, precLow)
}
return pr.String()
}
// A printer collects the state during printing of a file or expression.
type printer struct {
bytes.Buffer // output buffer
comment []Comment // pending end-of-line comments
margin int // left margin (indent), a number of spaces
depth int // nesting depth inside ( ) [ ] { }
}
// printf prints to the buffer.
func (p *printer) printf(format string, args ...interface{}) {
fmt.Fprintf(p, format, args...)
}
// indent returns the position on the current line, in bytes, 0-indexed.
func (p *printer) indent() int {
b := p.Bytes()
n := 0
for n < len(b) && b[len(b)-1-n] != '\n' {
n++
}
return n
}
// newline ends the current line, flushing end-of-line comments.
// It must only be called when printing a newline is known to be safe:
// when not inside an expression or when p.depth > 0.
// To break a line inside an expression that might not be enclosed
// in brackets of some kind, use breakline instead.
func (p *printer) newline() {
if len(p.comment) > 0 {
p.printf(" ")
for i, com := range p.comment {
if i > 0 {
p.trim()
p.printf("\n%*s", p.margin, "")
}
p.printf("%s", strings.TrimSpace(com.Token))
}
p.comment = p.comment[:0]
}
p.trim()
p.printf("\n%*s", p.margin, "")
}
// breakline breaks the current line, inserting a continuation \ if needed.
// If no continuation \ is needed, breakline flushes end-of-line comments.
func (p *printer) breakline() {
if p.depth == 0 {
// Cannot have both final \ and comments.
p.printf(" \\\n%*s", p.margin, "")
return
}
// Safe to use newline.
p.newline()
}
// trim removes trailing spaces from the current line.
func (p *printer) trim() {
// Remove trailing space from line we're about to end.
b := p.Bytes()
n := len(b)
for n > 0 && b[n-1] == ' ' {
n--
}
p.Truncate(n)
}
// file formats the given file into the print buffer.
func (p *printer) file(f *File) {
for _, com := range f.Before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
for i, stmt := range f.Stmt {
switch stmt := stmt.(type) {
case *CommentBlock:
// comments already handled
case *PythonBlock:
for _, com := range stmt.Before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
p.printf("%s", stmt.Token) // includes trailing newline
default:
p.expr(stmt, precLow)
p.newline()
}
for _, com := range stmt.Comment().After {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
if i+1 < len(f.Stmt) && !compactStmt(stmt, f.Stmt[i+1]) {
p.newline()
}
}
for _, com := range f.After {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
}
// compactStmt reports whether the pair of statements s1, s2
// should be printed without an intervening blank line.
// We omit the blank line when both are subinclude statements
// and the second one has no leading comments.
func compactStmt(s1, s2 Expr) bool {
if len(s2.Comment().Before) > 0 {
return false
}
return (isCall(s1, "subinclude") || isCall(s1, "load")) &&
(isCall(s2, "subinclude") || isCall(s2, "load"))
}
// isCall reports whether x is a call to a function with the given name.
func isCall(x Expr, name string) bool {
c, ok := x.(*CallExpr)
if !ok {
return false
}
nam, ok := c.X.(*LiteralExpr)
if !ok {
return false
}
return nam.Token == name
}
// Expression formatting.
// The expression formatter must introduce parentheses to force the
// meaning described by the parse tree. We preserve parentheses in the
// input, so extra parentheses are only needed if we have edited the tree.
//
// For example consider these expressions:
// (1) "x" "y" % foo
// (2) "x" + "y" % foo
// (3) "x" + ("y" % foo)
// (4) ("x" + "y") % foo
// When we parse (1), we represent the concatenation as an addition.
// However, if we print the addition back out without additional parens,
// as in (2), it has the same meaning as (3), which is not the original
// meaning. To preserve the original meaning we must add parens as in (4).
//
// To allow arbitrary rewrites to be formatted properly, we track full
// operator precedence while printing instead of just handling this one
// case of string concatenation.
//
// The precedences are assigned values low to high. A larger number
// binds tighter than a smaller number. All binary operators bind
// left-to-right.
const (
precLow = iota
precAssign
precComma
precColon
precIn
precOr
precAnd
precCmp
precAdd
precMultiply
precSuffix
precUnary
precConcat
)
// opPrec gives the precedence for operators found in a BinaryExpr.
var opPrec = map[string]int{
"=": precAssign,
"+=": precAssign,
"or": precOr,
"and": precAnd,
"<": precCmp,
">": precCmp,
"==": precCmp,
"!=": precCmp,
"<=": precCmp,
">=": precCmp,
"+": precAdd,
"-": precAdd,
"*": precMultiply,
"/": precMultiply,
"%": precMultiply,
}
// expr prints the expression v to the print buffer.
// The value outerPrec gives the precedence of the operator
// outside expr. If that operator binds tighter than v's operator,
// expr must introduce parentheses to preserve the meaning
// of the parse tree (see above).
func (p *printer) expr(v Expr, outerPrec int) {
// Emit line-comments preceding this expression.
// If we are in the middle of an expression but not inside ( ) [ ] { }
// then we cannot just break the line: we'd have to end it with a \.
// However, even then we can't emit line comments since that would
// end the expression. This is only a concern if we have rewritten
// the parse tree. If comments were okay before this expression in
// the original input they're still okay now, in the absense of rewrites.
//
// TODO(bazel-team): Check whether it is valid to emit comments right now,
// and if not, insert them earlier in the output instead, at the most
// recent \n not following a \ line.
if before := v.Comment().Before; len(before) > 0 {
// Want to print a line comment.
// Line comments must be at the current margin.
p.trim()
if p.indent() > 0 {
// There's other text on the line. Start a new line.
p.printf("\n")
}
// Re-indent to margin.
p.printf("%*s", p.margin, "")
for _, com := range before {
p.printf("%s", strings.TrimSpace(com.Token))
p.newline()
}
}
// Do we introduce parentheses?
// The result depends on the kind of expression.
// Each expression type that might need parentheses
// calls addParen with its own precedence.
// If parentheses are necessary, addParen prints the
// opening parenthesis and sets parenthesized so that
// the code after the switch can print the closing one.
parenthesized := false
addParen := func(prec int) {
if prec < outerPrec {
p.printf("(")
p.depth++
parenthesized = true
}
}
switch v := v.(type) {
default:
panic(fmt.Errorf("printer: unexpected type %T", v))
case *LiteralExpr:
p.printf("%s", v.Token)
case *StringExpr:
// If the Token is a correct quoting of Value, use it.
// This preserves the specific escaping choices that
// BUILD authors have made, and it also works around
// b/7272572.
if strings.HasPrefix(v.Token, `"`) {
s, triple, err := unquote(v.Token)
if s == v.Value && triple == v.TripleQuote && err == nil {
p.printf("%s", v.Token)
break
}
}
p.printf("%s", quote(v.Value, v.TripleQuote))
case *DotExpr:
addParen(precSuffix)
p.expr(v.X, precSuffix)
p.printf(".%s", v.Name)
case *IndexExpr:
addParen(precSuffix)
p.expr(v.X, precSuffix)
p.printf("[")
p.expr(v.Y, precLow)
p.printf("]")
case *KeyValueExpr:
p.expr(v.Key, precLow)
p.printf(": ")
p.expr(v.Value, precLow)
case *SliceExpr:
addParen(precSuffix)
p.expr(v.X, precSuffix)
p.printf("[")
if v.Y != nil {
p.expr(v.Y, precLow)
}
p.printf(":")
if v.Z != nil {
p.expr(v.Z, precLow)
}
p.printf("]")
case *UnaryExpr:
addParen(precUnary)
if v.Op == "not" {
p.printf("not ") // Requires a space after it.
} else {
p.printf("%s", v.Op)
}
p.expr(v.X, precUnary)
case *LambdaExpr:
addParen(precColon)
p.printf("lambda ")
for i, name := range v.Var {
if i > 0 {
p.printf(", ")
}
p.expr(name, precLow)
}
p.printf(": ")
p.expr(v.Expr, precColon)
case *BinaryExpr:
// Precedence: use the precedence of the operator.
// Since all binary expressions format left-to-right,
// it is okay for the left side to reuse the same operator
// without parentheses, so we use prec for v.X.
// For the same reason, the right side cannot reuse the same
// operator, or else a parse tree for a + (b + c), where the ( ) are
// not present in the source, will format as a + b + c, which
// means (a + b) + c. Treat the right expression as appearing
// in a context one precedence level higher: use prec+1 for v.Y.
//
// Line breaks: if we are to break the line immediately after
// the operator, introduce a margin at the current column,
// so that the second operand lines up with the first one and
// also so that neither operand can use space to the left.
// If the operator is an =, indent the right side another 4 spaces.
prec := opPrec[v.Op]
addParen(prec)
m := p.margin
if v.LineBreak {
p.margin = p.indent()
if v.Op == "=" {
p.margin += 4
}
}
p.expr(v.X, prec)
p.printf(" %s", v.Op)
if v.LineBreak {
p.breakline()
} else {
p.printf(" ")
}
p.expr(v.Y, prec+1)
p.margin = m
case *ParenExpr:
p.seq("()", []Expr{v.X}, &v.End, modeParen, false, v.ForceMultiLine)
case *CallExpr:
addParen(precSuffix)
p.expr(v.X, precSuffix)
p.seq("()", v.List, &v.End, modeCall, v.ForceCompact, v.ForceMultiLine)
case *ListExpr:
p.seq("[]", v.List, &v.End, modeList, false, v.ForceMultiLine)
case *SetExpr:
p.seq("{}", v.List, &v.End, modeList, false, v.ForceMultiLine)
case *TupleExpr:
p.seq("()", v.List, &v.End, modeTuple, v.ForceCompact, v.ForceMultiLine)
case *DictExpr:
var list []Expr
for _, x := range v.List {
list = append(list, x)
}
p.seq("{}", list, &v.End, modeDict, false, v.ForceMultiLine)
case *ListForExpr:
p.listFor(v)
case *ConditionalExpr:
addParen(precSuffix)
p.expr(v.Then, precSuffix)
p.printf(" if ")
p.expr(v.Test, precSuffix)
p.printf(" else ")
p.expr(v.Else, precSuffix)
}
// Add closing parenthesis if needed.
if parenthesized {
p.depth--
p.printf(")")
}
// Queue end-of-line comments for printing when we
// reach the end of the line.
p.comment = append(p.comment, v.Comment().Suffix...)
}
// A seqMode describes a formatting mode for a sequence of values,
// like a list or call arguments.
type seqMode int
const (
_ seqMode = iota
modeCall // f(x)
modeList // [x]
modeTuple // (x,)
modeParen // (x)
modeDict // {x:y}
)
// seq formats a list of values inside a given bracket pair (brack = "()", "[]", "{}").
// The end node holds any trailing comments to be printed just before the
// closing bracket.
// The mode parameter specifies the sequence mode (see above).
// If multiLine is true, seq avoids the compact form even
// for 0- and 1-element sequences.
func (p *printer) seq(brack string, list []Expr, end *End, mode seqMode, forceCompact, forceMultiLine bool) {
p.printf("%s", brack[:1])
p.depth++
// If there are line comments, force multiline
// so we can print the comments before the closing bracket.
for _, x := range list {
if len(x.Comment().Before) > 0 {
forceMultiLine = true
}
}
if len(end.Before) > 0 {
forceMultiLine = true
}
// Resolve possibly ambiguous call arguments explicitly
// instead of depending on implicit resolution in logic below.
if forceMultiLine {
forceCompact = false
}
switch {
case len(list) == 0 && !forceMultiLine:
// Compact form: print nothing.
case len(list) == 1 && !forceMultiLine:
// Compact form.
p.expr(list[0], precLow)
// Tuple must end with comma, to mark it as a tuple.
if mode == modeTuple {
p.printf(",")
}
case forceCompact:
// Compact form but multiple elements.
for i, x := range list {
if i > 0 {
p.printf(", ")
}
p.expr(x, precLow)
}
default:
// Multi-line form.
p.margin += 4
for i, x := range list {
// If we are about to break the line before the first
// element and there are trailing end-of-line comments
// waiting to be printed, delay them and print them as
// whole-line comments preceding that element.
// Do this by printing a newline ourselves and positioning
// so that the end-of-line comment, with the two spaces added,
// will line up with the current margin.
if i == 0 && len(p.comment) > 0 {
p.printf("\n%*s", p.margin-2, "")
}
p.newline()
p.expr(x, precLow)
if mode != modeParen || i+1 < len(list) {
p.printf(",")
}
}
// Final comments.
for _, com := range end.Before {
p.newline()
p.printf("%s", strings.TrimSpace(com.Token))
}
p.margin -= 4
p.newline()
}
p.depth--
p.printf("%s", brack[1:])
}
// listFor formats a ListForExpr (list comprehension).
// The single-line form is:
// [x for y in z if c]
//
// and the multi-line form is:
// [
// x
// for y in z
// if c
// ]
//
func (p *printer) listFor(v *ListForExpr) {
multiLine := v.ForceMultiLine || len(v.End.Before) > 0
// space breaks the line in multiline mode
// or else prints a space.
space := func() {
if multiLine {
p.breakline()
} else {
p.printf(" ")
}
}
if v.Brack != "" {
p.depth++
p.printf("%s", v.Brack[:1])
}
if multiLine {
if v.Brack != "" {
p.margin += 4
}
p.newline()
}
p.expr(v.X, precLow)
for _, c := range v.For {
space()
p.printf("for ")
for i, name := range c.For.Var {
if i > 0 {
p.printf(", ")
}
p.expr(name, precLow)
}
p.printf(" in ")
p.expr(c.For.Expr, precLow)
p.comment = append(p.comment, c.For.Comment().Suffix...)
for _, i := range c.Ifs {
space()
p.printf("if ")
p.expr(i.Cond, precLow)
p.comment = append(p.comment, i.Comment().Suffix...)
}
p.comment = append(p.comment, c.Comment().Suffix...)
}
if multiLine {
for _, com := range v.End.Before {
p.newline()
p.printf("%s", strings.TrimSpace(com.Token))
}
if v.Brack != "" {
p.margin -= 4
}
p.newline()
}
if v.Brack != "" {
p.printf("%s", v.Brack[1:])
p.depth--
}
}

262
vendor/github.com/bazelbuild/buildtools/build/quote.go generated vendored Normal file
View File

@@ -0,0 +1,262 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Python quoted strings.
package build
import (
"bytes"
"fmt"
"strconv"
"strings"
)
// unesc maps single-letter chars following \ to their actual values.
var unesc = [256]byte{
'a': '\a',
'b': '\b',
'f': '\f',
'n': '\n',
'r': '\r',
't': '\t',
'v': '\v',
'\\': '\\',
'\'': '\'',
'"': '"',
}
// esc maps escape-worthy bytes to the char that should follow \.
var esc = [256]byte{
'\a': 'a',
'\b': 'b',
'\f': 'f',
'\n': 'n',
'\r': 'r',
'\t': 't',
'\v': 'v',
'\\': '\\',
'\'': '\'',
'"': '"',
}
// notEsc is a list of characters that can follow a \ in a string value
// without having to escape the \. That is, since ( is in this list, we
// quote the Go string "foo\\(bar" as the Python literal "foo\(bar".
// This really does happen in BUILD files, especially in strings
// being used as shell arguments containing regular expressions.
const notEsc = " !#$%&()*+,-./:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ{|}~"
// unquote unquotes the quoted string, returning the actual
// string value, whether the original was triple-quoted, and
// an error describing invalid input.
func unquote(quoted string) (s string, triple bool, err error) {
// Check for raw prefix: means don't interpret the inner \.
raw := false
if strings.HasPrefix(quoted, "r") {
raw = true
quoted = quoted[1:]
}
if len(quoted) < 2 {
err = fmt.Errorf("string literal too short")
return
}
if quoted[0] != '"' && quoted[0] != '\'' || quoted[0] != quoted[len(quoted)-1] {
err = fmt.Errorf("string literal has invalid quotes")
}
// Check for triple quoted string.
quote := quoted[0]
if len(quoted) >= 6 && quoted[1] == quote && quoted[2] == quote && quoted[:3] == quoted[len(quoted)-3:] {
triple = true
quoted = quoted[3 : len(quoted)-3]
} else {
quoted = quoted[1 : len(quoted)-1]
}
// Now quoted is the quoted data, but no quotes.
// If we're in raw mode or there are no escapes, we're done.
if raw || !strings.Contains(quoted, `\`) {
s = quoted
return
}
// Otherwise process quoted string.
// Each iteration processes one escape sequence along with the
// plain text leading up to it.
var buf bytes.Buffer
for {
// Remove prefix before escape sequence.
i := strings.Index(quoted, `\`)
if i < 0 {
i = len(quoted)
}
buf.WriteString(quoted[:i])
quoted = quoted[i:]
if len(quoted) == 0 {
break
}
// Process escape sequence.
if len(quoted) == 1 {
err = fmt.Errorf(`truncated escape sequence \`)
return
}
switch quoted[1] {
default:
// In Python, if \z (for some byte z) is not a known escape sequence
// then it appears as literal text in the string.
buf.WriteString(quoted[:2])
quoted = quoted[2:]
case '\n':
// Ignore the escape and the line break.
quoted = quoted[2:]
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '\'', '"':
// One-char escape
buf.WriteByte(unesc[quoted[1]])
quoted = quoted[2:]
case '0', '1', '2', '3', '4', '5', '6', '7':
// Octal escape, up to 3 digits.
n := int(quoted[1] - '0')
quoted = quoted[2:]
for i := 1; i < 3; i++ {
if len(quoted) == 0 || quoted[0] < '0' || '7' < quoted[0] {
break
}
n = n*8 + int(quoted[0]-'0')
quoted = quoted[1:]
}
if n >= 256 {
// NOTE: Python silently discards the high bit,
// so that '\541' == '\141' == 'a'.
// Let's see if we can avoid doing that in BUILD files.
err = fmt.Errorf(`invalid escape sequence \%03o`, n)
return
}
buf.WriteByte(byte(n))
case 'x':
// Hexadecimal escape, exactly 2 digits.
if len(quoted) < 4 {
err = fmt.Errorf(`truncated escape sequence %s`, quoted)
return
}
n, err1 := strconv.ParseInt(quoted[2:4], 16, 0)
if err1 != nil {
err = fmt.Errorf(`invalid escape sequence %s`, quoted[:4])
return
}
buf.WriteByte(byte(n))
quoted = quoted[4:]
}
}
s = buf.String()
return
}
// indexByte returns the index of the first instance of b in s, or else -1.
func indexByte(s string, b byte) int {
for i := 0; i < len(s); i++ {
if s[i] == b {
return i
}
}
return -1
}
// hex is a list of the hexadecimal digits, for use in quoting.
// We always print lower-case hexadecimal.
const hex = "0123456789abcdef"
// quote returns the quoted form of the string value "x".
// If triple is true, quote uses the triple-quoted form """x""".
func quote(unquoted string, triple bool) string {
q := `"`
if triple {
q = `"""`
}
var buf bytes.Buffer
buf.WriteString(q)
for i := 0; i < len(unquoted); i++ {
c := unquoted[i]
if c == '"' && triple && (i+1 < len(unquoted) && unquoted[i+1] != '"' || i+2 < len(unquoted) && unquoted[i+2] != '"') {
// Can pass up to two quotes through, because they are followed by a non-quote byte.
buf.WriteByte(c)
if i+1 < len(unquoted) && unquoted[i+1] == '"' {
buf.WriteByte(c)
i++
}
continue
}
if triple && c == '\n' {
// Can allow newline in triple-quoted string.
buf.WriteByte(c)
continue
}
if c == '\'' {
// Can allow ' since we always use ".
buf.WriteByte(c)
continue
}
if c == '\\' {
if i+1 < len(unquoted) && indexByte(notEsc, unquoted[i+1]) >= 0 {
// Can pass \ through when followed by a byte that
// known not to be a valid escape sequence and also
// that does not trigger an escape sequence of its own.
// Use this, because various BUILD files do.
buf.WriteByte('\\')
buf.WriteByte(unquoted[i+1])
i++
continue
}
}
if esc[c] != 0 {
buf.WriteByte('\\')
buf.WriteByte(esc[c])
continue
}
if c < 0x20 || c >= 0x80 {
// BUILD files are supposed to be Latin-1, so escape all control and high bytes.
// I'd prefer to use \x here, but Blaze does not implement
// \x in quoted strings (b/7272572).
buf.WriteByte('\\')
buf.WriteByte(hex[c>>6]) // actually octal but reusing hex digits 0-7.
buf.WriteByte(hex[(c>>3)&7])
buf.WriteByte(hex[c&7])
/*
buf.WriteByte('\\')
buf.WriteByte('x')
buf.WriteByte(hex[c>>4])
buf.WriteByte(hex[c&0xF])
*/
continue
}
buf.WriteByte(c)
continue
}
buf.WriteString(q)
return buf.String()
}

View File

@@ -0,0 +1,817 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Rewriting of high-level (not purely syntactic) BUILD constructs.
package build
import (
"path"
"regexp"
"sort"
"strings"
"github.com/bazelbuild/buildtools/tables"
)
// For debugging: flag to disable certain rewrites.
var DisableRewrites []string
// disabled reports whether the named rewrite is disabled.
func disabled(name string) bool {
for _, x := range DisableRewrites {
if name == x {
return true
}
}
return false
}
// For debugging: allow sorting of these lists even with sorting otherwise disabled.
var AllowSort []string
// allowedSort reports whether sorting is allowed in the named context.
func allowedSort(name string) bool {
for _, x := range AllowSort {
if name == x {
return true
}
}
return false
}
// Rewrite applies the high-level Buildifier rewrites to f, modifying it in place.
// If info is non-nil, Rewrite updates it with information about the rewrite.
func Rewrite(f *File, info *RewriteInfo) {
// Allocate an info so that helpers can assume it's there.
if info == nil {
info = new(RewriteInfo)
}
for _, r := range rewrites {
if !disabled(r.name) {
r.fn(f, info)
}
}
}
// RewriteInfo collects information about what Rewrite did.
type RewriteInfo struct {
EditLabel int // number of label strings edited
NameCall int // number of calls with argument names added
SortCall int // number of call argument lists sorted
SortStringList int // number of string lists sorted
UnsafeSort int // number of unsafe string lists sorted
Log []string // log entries - may change
}
func (info *RewriteInfo) String() string {
s := ""
if info.EditLabel > 0 {
s += " label"
}
if info.NameCall > 0 {
s += " callname"
}
if info.SortCall > 0 {
s += " callsort"
}
if info.SortStringList > 0 {
s += " listsort"
}
if info.UnsafeSort > 0 {
s += " unsafesort"
}
if s != "" {
s = s[1:]
}
return s
}
// rewrites is the list of all Buildifier rewrites, in the order in which they are applied.
// The order here matters: for example, label canonicalization must happen
// before sorting lists of strings.
var rewrites = []struct {
name string
fn func(*File, *RewriteInfo)
}{
{"callsort", sortCallArgs},
{"label", fixLabels},
{"listsort", sortStringLists},
{"multiplus", fixMultilinePlus},
}
// leaveAlone reports whether any of the nodes on the stack are marked
// with a comment containing "buildifier: leave-alone".
func leaveAlone(stk []Expr, final Expr) bool {
for _, x := range stk {
if leaveAlone1(x) {
return true
}
}
if final != nil && leaveAlone1(final) {
return true
}
return false
}
// hasComment reports whether x is marked with a comment that
// after being converted to lower case, contains the specified text.
func hasComment(x Expr, text string) bool {
for _, com := range x.Comment().Before {
if strings.Contains(strings.ToLower(com.Token), text) {
return true
}
}
return false
}
// leaveAlone1 reports whether x is marked with a comment containing
// "buildifier: leave-alone", case-insensitive.
func leaveAlone1(x Expr) bool {
return hasComment(x, "buildifier: leave-alone")
}
// doNotSort reports whether x is marked with a comment containing
// "do not sort", case-insensitive.
func doNotSort(x Expr) bool {
return hasComment(x, "do not sort")
}
// keepSorted reports whether x is marked with a comment containing
// "keep sorted", case-insensitive.
func keepSorted(x Expr) bool {
return hasComment(x, "keep sorted")
}
// fixLabels rewrites labels into a canonical form.
//
// First, it joins labels written as string addition, turning
// "//x" + ":y" (usually split across multiple lines) into "//x:y".
//
// Second, it removes redundant target qualifiers, turning labels like
// "//third_party/m4:m4" into "//third_party/m4" as well as ones like
// "@foo//:foo" into "@foo".
//
func fixLabels(f *File, info *RewriteInfo) {
joinLabel := func(p *Expr) {
add, ok := (*p).(*BinaryExpr)
if !ok || add.Op != "+" {
return
}
str1, ok := add.X.(*StringExpr)
if !ok || !strings.HasPrefix(str1.Value, "//") || strings.Contains(str1.Value, " ") {
return
}
str2, ok := add.Y.(*StringExpr)
if !ok || strings.Contains(str2.Value, " ") {
return
}
info.EditLabel++
str1.Value += str2.Value
// Deleting nodes add and str2.
// Merge comments from add, str1, and str2 and save in str1.
com1 := add.Comment()
com2 := str1.Comment()
com3 := str2.Comment()
com1.Before = append(com1.Before, com2.Before...)
com1.Before = append(com1.Before, com3.Before...)
com1.Suffix = append(com1.Suffix, com2.Suffix...)
com1.Suffix = append(com1.Suffix, com3.Suffix...)
*str1.Comment() = *com1
*p = str1
}
labelPrefix := "//"
if tables.StripLabelLeadingSlashes {
labelPrefix = ""
}
// labelRE matches label strings, e.g. @r//x/y/z:abc
// where $1 is @r//x/y/z, $2 is @r//, $3 is r, $4 is z, $5 is abc.
labelRE := regexp.MustCompile(`^(((?:@(\w+))?//|` + labelPrefix + `)(?:.+/)?([^:]*))(?::([^:]+))?$`)
shortenLabel := func(v Expr) {
str, ok := v.(*StringExpr)
if !ok {
return
}
editPerformed := false
if tables.StripLabelLeadingSlashes && strings.HasPrefix(str.Value, "//") {
if path.Dir(f.Path) == "." || !strings.HasPrefix(str.Value, "//:") {
editPerformed = true
str.Value = str.Value[2:]
}
}
if tables.ShortenAbsoluteLabelsToRelative {
thisPackage := labelPrefix + path.Dir(f.Path)
if str.Value == thisPackage {
editPerformed = true
str.Value = ":" + path.Base(str.Value)
} else if strings.HasPrefix(str.Value, thisPackage+":") {
editPerformed = true
str.Value = str.Value[len(thisPackage):]
}
}
m := labelRE.FindStringSubmatch(str.Value)
if m == nil {
return
}
if m[4] != "" && m[4] == m[5] { // e.g. //foo:foo
editPerformed = true
str.Value = m[1]
} else if m[3] != "" && m[4] == "" && m[3] == m[5] { // e.g. @foo//:foo
editPerformed = true
str.Value = "@" + m[3]
}
if editPerformed {
info.EditLabel++
}
}
Walk(f, func(v Expr, stk []Expr) {
switch v := v.(type) {
case *CallExpr:
if leaveAlone(stk, v) {
return
}
for i := range v.List {
if leaveAlone1(v.List[i]) {
continue
}
as, ok := v.List[i].(*BinaryExpr)
if !ok || as.Op != "=" {
continue
}
key, ok := as.X.(*LiteralExpr)
if !ok || !tables.IsLabelArg[key.Token] || tables.LabelBlacklist[callName(v)+"."+key.Token] {
continue
}
if leaveAlone1(as.Y) {
continue
}
if list, ok := as.Y.(*ListExpr); ok {
for i := range list.List {
if leaveAlone1(list.List[i]) {
continue
}
joinLabel(&list.List[i])
shortenLabel(list.List[i])
}
}
if set, ok := as.Y.(*SetExpr); ok {
for i := range set.List {
if leaveAlone1(set.List[i]) {
continue
}
joinLabel(&set.List[i])
shortenLabel(set.List[i])
}
} else {
joinLabel(&as.Y)
shortenLabel(as.Y)
}
}
}
})
}
// callName returns the name of the rule being called by call.
// If the call is not to a literal rule name, callName returns "".
func callName(call *CallExpr) string {
rule, ok := call.X.(*LiteralExpr)
if !ok {
return ""
}
return rule.Token
}
// sortCallArgs sorts lists of named arguments to a call.
func sortCallArgs(f *File, info *RewriteInfo) {
Walk(f, func(v Expr, stk []Expr) {
call, ok := v.(*CallExpr)
if !ok {
return
}
if leaveAlone(stk, call) {
return
}
rule := callName(call)
if rule == "" {
return
}
// Find the tail of the argument list with named arguments.
start := len(call.List)
for start > 0 && argName(call.List[start-1]) != "" {
start--
}
// Record information about each arg into a sortable list.
var args namedArgs
for i, x := range call.List[start:] {
name := argName(x)
args = append(args, namedArg{ruleNamePriority(rule, name), name, i, x})
}
// Sort the list and put the args back in the new order.
if sort.IsSorted(args) {
return
}
info.SortCall++
sort.Sort(args)
for i, x := range args {
call.List[start+i] = x.expr
}
})
}
// ruleNamePriority maps a rule argument name to its sorting priority.
// It could use the auto-generated per-rule tables but for now it just
// falls back to the original list.
func ruleNamePriority(rule, arg string) int {
ruleArg := rule + "." + arg
if val, ok := tables.NamePriority[ruleArg]; ok {
return val
}
return tables.NamePriority[arg]
/*
list := ruleArgOrder[rule]
if len(list) == 0 {
return tables.NamePriority[arg]
}
for i, x := range list {
if x == arg {
return i
}
}
return len(list)
*/
}
// If x is of the form key=value, argName returns the string key.
// Otherwise argName returns "".
func argName(x Expr) string {
if as, ok := x.(*BinaryExpr); ok && as.Op == "=" {
if id, ok := as.X.(*LiteralExpr); ok {
return id.Token
}
}
return ""
}
// A namedArg records information needed for sorting
// a named call argument into its proper position.
type namedArg struct {
priority int // kind of name; first sort key
name string // name; second sort key
index int // original index; final sort key
expr Expr // name=value argument
}
// namedArgs is a slice of namedArg that implements sort.Interface
type namedArgs []namedArg
func (x namedArgs) Len() int { return len(x) }
func (x namedArgs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x namedArgs) Less(i, j int) bool {
p := x[i]
q := x[j]
if p.priority != q.priority {
return p.priority < q.priority
}
if p.name != q.name {
return p.name < q.name
}
return p.index < q.index
}
// sortStringLists sorts lists of string literals used as specific rule arguments.
func sortStringLists(f *File, info *RewriteInfo) {
Walk(f, func(v Expr, stk []Expr) {
switch v := v.(type) {
case *CallExpr:
if leaveAlone(stk, v) {
return
}
rule := callName(v)
for _, arg := range v.List {
if leaveAlone1(arg) {
continue
}
as, ok := arg.(*BinaryExpr)
if !ok || as.Op != "=" || leaveAlone1(as) || doNotSort(as) {
continue
}
key, ok := as.X.(*LiteralExpr)
if !ok {
continue
}
context := rule + "." + key.Token
if !tables.IsSortableListArg[key.Token] || tables.SortableBlacklist[context] {
continue
}
if disabled("unsafesort") && !tables.SortableWhitelist[context] && !allowedSort(context) {
continue
}
sortStringList(as.Y, info, context)
}
case *BinaryExpr:
if disabled("unsafesort") {
return
}
// "keep sorted" comment on x = list forces sorting of list.
as := v
if as.Op == "=" && keepSorted(as) {
sortStringList(as.Y, info, "?")
}
case *KeyValueExpr:
if disabled("unsafesort") {
return
}
// "keep sorted" before key: list also forces sorting of list.
if keepSorted(v) {
sortStringList(v.Value, info, "?")
}
case *ListExpr:
if disabled("unsafesort") {
return
}
// "keep sorted" comment above first list element also forces sorting of list.
if len(v.List) > 0 && keepSorted(v.List[0]) {
sortStringList(v, info, "?")
}
}
})
}
// SortStringList sorts x, a list of strings.
func SortStringList(x Expr) {
sortStringList(x, nil, "")
}
// sortStringList sorts x, a list of strings.
// The list is broken by non-strings and by blank lines and comments into chunks.
// Each chunk is sorted in place.
func sortStringList(x Expr, info *RewriteInfo, context string) {
list, ok := x.(*ListExpr)
if !ok || len(list.List) < 2 || doNotSort(list.List[0]) {
return
}
forceSort := keepSorted(list.List[0])
// TODO(bazel-team): Decide how to recognize lists that cannot
// be sorted. Avoiding all lists with comments avoids sorting
// lists that say explicitly, in some form or another, why they
// cannot be sorted. For example, many cc_test rules require
// certain order in their deps attributes.
if !forceSort {
if line, _ := hasComments(list); line {
return
}
}
// Sort chunks of the list with no intervening blank lines or comments.
for i := 0; i < len(list.List); {
if _, ok := list.List[i].(*StringExpr); !ok {
i++
continue
}
j := i + 1
for ; j < len(list.List); j++ {
if str, ok := list.List[j].(*StringExpr); !ok || len(str.Before) > 0 {
break
}
}
var chunk []stringSortKey
for index, x := range list.List[i:j] {
chunk = append(chunk, makeSortKey(index, x.(*StringExpr)))
}
if !sort.IsSorted(byStringExpr(chunk)) || !isUniq(chunk) {
if info != nil {
info.SortStringList++
if !tables.SortableWhitelist[context] {
info.UnsafeSort++
info.Log = append(info.Log, "sort:"+context)
}
}
before := chunk[0].x.Comment().Before
chunk[0].x.Comment().Before = nil
sort.Sort(byStringExpr(chunk))
chunk = uniq(chunk)
chunk[0].x.Comment().Before = before
for offset, key := range chunk {
list.List[i+offset] = key.x
}
list.List = append(list.List[:(i+len(chunk))], list.List[j:]...)
}
i = j
}
}
// uniq removes duplicates from a list, which must already be sorted.
// It edits the list in place.
func uniq(sortedList []stringSortKey) []stringSortKey {
out := sortedList[:0]
for _, sk := range sortedList {
if len(out) == 0 || sk.value != out[len(out)-1].value {
out = append(out, sk)
}
}
return out
}
// isUniq reports whether the sorted list only contains unique elements.
func isUniq(list []stringSortKey) bool {
for i := range list {
if i+1 < len(list) && list[i].value == list[i+1].value {
return false
}
}
return true
}
// If stk describes a call argument like rule(arg=...), callArgName
// returns the name of that argument, formatted as "rule.arg".
func callArgName(stk []Expr) string {
n := len(stk)
if n < 2 {
return ""
}
arg := argName(stk[n-1])
if arg == "" {
return ""
}
call, ok := stk[n-2].(*CallExpr)
if !ok {
return ""
}
rule, ok := call.X.(*LiteralExpr)
if !ok {
return ""
}
return rule.Token + "." + arg
}
// A stringSortKey records information about a single string literal to be
// sorted. The strings are first grouped into four phases: most strings,
// strings beginning with ":", strings beginning with "//", and strings
// beginning with "@". The next significant part of the comparison is the list
// of elements in the value, where elements are split at `.' and `:'. Finally
// we compare by value and break ties by original index.
type stringSortKey struct {
phase int
split []string
value string
original int
x Expr
}
func makeSortKey(index int, x *StringExpr) stringSortKey {
key := stringSortKey{
value: x.Value,
original: index,
x: x,
}
switch {
case strings.HasPrefix(x.Value, ":"):
key.phase = 1
case strings.HasPrefix(x.Value, "//") || (tables.StripLabelLeadingSlashes && !strings.HasPrefix(x.Value, "@")):
key.phase = 2
case strings.HasPrefix(x.Value, "@"):
key.phase = 3
}
key.split = strings.Split(strings.Replace(x.Value, ":", ".", -1), ".")
return key
}
// byStringExpr implements sort.Interface for a list of stringSortKey.
type byStringExpr []stringSortKey
func (x byStringExpr) Len() int { return len(x) }
func (x byStringExpr) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byStringExpr) Less(i, j int) bool {
xi := x[i]
xj := x[j]
if xi.phase != xj.phase {
return xi.phase < xj.phase
}
for k := 0; k < len(xi.split) && k < len(xj.split); k++ {
if xi.split[k] != xj.split[k] {
return xi.split[k] < xj.split[k]
}
}
if len(xi.split) != len(xj.split) {
return len(xi.split) < len(xj.split)
}
if xi.value != xj.value {
return xi.value < xj.value
}
return xi.original < xj.original
}
// fixMultilinePlus turns
//
// ... +
// [ ... ]
//
// ... +
// call(...)
//
// into
// ... + [
// ...
// ]
//
// ... + call(
// ...
// )
//
// which typically works better with our aggressively compact formatting.
func fixMultilinePlus(f *File, info *RewriteInfo) {
// List manipulation helpers.
// As a special case, we treat f([...]) as a list, mainly
// for glob.
// isList reports whether x is a list.
var isList func(x Expr) bool
isList = func(x Expr) bool {
switch x := x.(type) {
case *ListExpr:
return true
case *CallExpr:
if len(x.List) == 1 {
return isList(x.List[0])
}
}
return false
}
// isMultiLine reports whether x is a multiline list.
var isMultiLine func(Expr) bool
isMultiLine = func(x Expr) bool {
switch x := x.(type) {
case *ListExpr:
return x.ForceMultiLine || len(x.List) > 1
case *CallExpr:
if x.ForceMultiLine || len(x.List) > 1 && !x.ForceCompact {
return true
}
if len(x.List) == 1 {
return isMultiLine(x.List[0])
}
}
return false
}
// forceMultiLine tries to force the list x to use a multiline form.
// It reports whether it was successful.
var forceMultiLine func(Expr) bool
forceMultiLine = func(x Expr) bool {
switch x := x.(type) {
case *ListExpr:
// Already multi line?
if x.ForceMultiLine {
return true
}
// If this is a list containing a list, force the
// inner list to be multiline instead.
if len(x.List) == 1 && forceMultiLine(x.List[0]) {
return true
}
x.ForceMultiLine = true
return true
case *CallExpr:
if len(x.List) == 1 {
return forceMultiLine(x.List[0])
}
}
return false
}
skip := map[Expr]bool{}
Walk(f, func(v Expr, stk []Expr) {
if skip[v] {
return
}
bin, ok := v.(*BinaryExpr)
if !ok || bin.Op != "+" {
return
}
// Found a +.
// w + x + y + z parses as ((w + x) + y) + z,
// so chase down the left side to make a list of
// all the things being added together, separated
// by the BinaryExprs that join them.
// Mark them as "skip" so that when Walk recurses
// into the subexpressions, we won't reprocess them.
var all []Expr
for {
all = append(all, bin.Y, bin)
bin1, ok := bin.X.(*BinaryExpr)
if !ok || bin1.Op != "+" {
break
}
bin = bin1
skip[bin] = true
}
all = append(all, bin.X)
// Because the outermost expression was the
// rightmost one, the list is backward. Reverse it.
for i, j := 0, len(all)-1; i < j; i, j = i+1, j-1 {
all[i], all[j] = all[j], all[i]
}
// The 'all' slice is alternating addends and BinaryExpr +'s:
// w, +, x, +, y, +, z
// If there are no lists involved, don't rewrite anything.
haveList := false
for i := 0; i < len(all); i += 2 {
if isList(all[i]) {
haveList = true
break
}
}
if !haveList {
return
}
// Okay, there are lists.
// Consider each + next to a line break.
for i := 1; i < len(all); i += 2 {
bin := all[i].(*BinaryExpr)
if !bin.LineBreak {
continue
}
// We're going to break the line after the +.
// If it is followed by a list, force that to be
// multiline instead.
if forceMultiLine(all[i+1]) {
bin.LineBreak = false
continue
}
// If the previous list was multiline already,
// don't bother with the line break after
// the +.
if isMultiLine(all[i-1]) {
bin.LineBreak = false
continue
}
}
})
}
// hasComments reports whether any comments are associated with
// the list or its elements.
func hasComments(list *ListExpr) (line, suffix bool) {
com := list.Comment()
if len(com.Before) > 0 || len(com.After) > 0 || len(list.End.Before) > 0 {
line = true
}
if len(com.Suffix) > 0 {
suffix = true
}
for _, elem := range list.List {
com := elem.Comment()
if len(com.Before) > 0 {
line = true
}
if len(com.Suffix) > 0 {
suffix = true
}
}
return
}

260
vendor/github.com/bazelbuild/buildtools/build/rule.go generated vendored Normal file
View File

@@ -0,0 +1,260 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Rule-level API for inspecting and modifying a build.File syntax tree.
package build
import "strings"
// A Rule represents a single BUILD rule.
type Rule struct {
Call *CallExpr
}
// Rules returns the rules in the file of the given kind (such as "go_library").
// If kind == "", Rules returns all rules in the file.
func (f *File) Rules(kind string) []*Rule {
var all []*Rule
for _, stmt := range f.Stmt {
call, ok := stmt.(*CallExpr)
if !ok {
continue
}
rule := &Rule{call}
if kind != "" && rule.Kind() != kind {
continue
}
all = append(all, rule)
}
return all
}
// RuleAt returns the rule in the file that starts at the specified line, or null if no such rule.
func (f *File) RuleAt(linenum int) *Rule {
for _, stmt := range f.Stmt {
call, ok := stmt.(*CallExpr)
if !ok {
continue
}
start, end := call.X.Span()
if start.Line <= linenum && linenum <= end.Line {
return &Rule{call}
}
}
return nil
}
// DelRules removes rules with the given kind and name from the file.
// An empty kind matches all kinds; an empty name matches all names.
// It returns the number of rules that were deleted.
func (f *File) DelRules(kind, name string) int {
var i int
for _, stmt := range f.Stmt {
if call, ok := stmt.(*CallExpr); ok {
r := &Rule{call}
if (kind == "" || r.Kind() == kind) &&
(name == "" || r.AttrString("name") == name) {
continue
}
}
f.Stmt[i] = stmt
i++
}
n := len(f.Stmt) - i
f.Stmt = f.Stmt[:i]
return n
}
// Kind returns the rule's kind (such as "go_library").
// The kind of the rule may be given by a literal or it may be a sequence of dot expressions that
// begins with a literal, if the call expression does not conform to either of these forms, an
// empty string will be returned
func (r *Rule) Kind() string {
var names []string
expr := r.Call.X
for {
x, ok := expr.(*DotExpr)
if !ok {
break
}
names = append(names, x.Name)
expr = x.X
}
x, ok := expr.(*LiteralExpr)
if !ok {
return ""
}
names = append(names, x.Token)
// Reverse the elements since the deepest expression contains the leading literal
for l, r := 0, len(names)-1; l < r; l, r = l+1, r-1 {
names[l], names[r] = names[r], names[l]
}
return strings.Join(names, ".")
}
// SetKind changes rule's kind (such as "go_library").
func (r *Rule) SetKind(kind string) {
names := strings.Split(kind, ".")
var expr Expr
expr = &LiteralExpr{Token: names[0]}
for _, name := range names[1:] {
expr = &DotExpr{X: expr, Name: name}
}
r.Call.X = expr
}
// Name returns the rule's target name.
// If the rule has no target name, Name returns the empty string.
func (r *Rule) Name() string {
return r.AttrString("name")
}
// AttrKeys returns the keys of all the rule's attributes.
func (r *Rule) AttrKeys() []string {
var keys []string
for _, expr := range r.Call.List {
if binExpr, ok := expr.(*BinaryExpr); ok && binExpr.Op == "=" {
if keyExpr, ok := binExpr.X.(*LiteralExpr); ok {
keys = append(keys, keyExpr.Token)
}
}
}
return keys
}
// AttrDefn returns the BinaryExpr defining the rule's attribute with the given key.
// That is, the result is a *BinaryExpr with Op == "=".
// If the rule has no such attribute, AttrDefn returns nil.
func (r *Rule) AttrDefn(key string) *BinaryExpr {
for _, kv := range r.Call.List {
as, ok := kv.(*BinaryExpr)
if !ok || as.Op != "=" {
continue
}
k, ok := as.X.(*LiteralExpr)
if !ok || k.Token != key {
continue
}
return as
}
return nil
}
// Attr returns the value of the rule's attribute with the given key
// (such as "name" or "deps").
// If the rule has no such attribute, Attr returns nil.
func (r *Rule) Attr(key string) Expr {
as := r.AttrDefn(key)
if as == nil {
return nil
}
return as.Y
}
// DelAttr deletes the rule's attribute with the named key.
// It returns the old value of the attribute, or nil if the attribute was not found.
func (r *Rule) DelAttr(key string) Expr {
list := r.Call.List
for i, kv := range list {
as, ok := kv.(*BinaryExpr)
if !ok || as.Op != "=" {
continue
}
k, ok := as.X.(*LiteralExpr)
if !ok || k.Token != key {
continue
}
copy(list[i:], list[i+1:])
r.Call.List = list[:len(list)-1]
return as.Y
}
return nil
}
// SetAttr sets the rule's attribute with the given key to value.
// If the rule has no attribute with the key, SetAttr appends
// one to the end of the rule's attribute list.
func (r *Rule) SetAttr(key string, val Expr) {
as := r.AttrDefn(key)
if as != nil {
as.Y = val
return
}
r.Call.List = append(r.Call.List,
&BinaryExpr{
X: &LiteralExpr{Token: key},
Op: "=",
Y: val,
},
)
}
// AttrLiteral returns the literal form of the rule's attribute
// with the given key (such as "cc_api_version"), only when
// that value is an identifier or number.
// If the rule has no such attribute or the attribute is not an identifier or number,
// AttrLiteral returns "".
func (r *Rule) AttrLiteral(key string) string {
lit, ok := r.Attr(key).(*LiteralExpr)
if !ok {
return ""
}
return lit.Token
}
// AttrString returns the value of the rule's attribute
// with the given key (such as "name"), as a string.
// If the rule has no such attribute or the attribute has a non-string value,
// Attr returns the empty string.
func (r *Rule) AttrString(key string) string {
str, ok := r.Attr(key).(*StringExpr)
if !ok {
return ""
}
return str.Value
}
// AttrStrings returns the value of the rule's attribute
// with the given key (such as "srcs"), as a []string.
// If the rule has no such attribute or the attribute is not
// a list of strings, AttrStrings returns a nil slice.
func (r *Rule) AttrStrings(key string) []string {
return Strings(r.Attr(key))
}
// Strings returns expr as a []string.
// If expr is not a list of string literals,
// Strings returns a nil slice instead.
// If expr is an empty list of string literals,
// returns a non-nil empty slice.
// (this allows differentiating between these two cases)
func Strings(expr Expr) []string {
list, ok := expr.(*ListExpr)
if !ok {
return nil
}
all := []string{} // not nil
for _, l := range list.List {
str, ok := l.(*StringExpr)
if !ok {
return nil
}
all = append(all, str.Value)
}
return all
}

423
vendor/github.com/bazelbuild/buildtools/build/syntax.go generated vendored Normal file
View File

@@ -0,0 +1,423 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package build implements parsing and printing of BUILD files.
package build
// Syntax data structure definitions.
import (
"strings"
"unicode/utf8"
)
// A Position describes the position between two bytes of input.
type Position struct {
Line int // line in input (starting at 1)
LineRune int // rune in line (starting at 1)
Byte int // byte in input (starting at 0)
}
// add returns the position at the end of s, assuming it starts at p.
func (p Position) add(s string) Position {
p.Byte += len(s)
if n := strings.Count(s, "\n"); n > 0 {
p.Line += n
s = s[strings.LastIndex(s, "\n")+1:]
p.LineRune = 1
}
p.LineRune += utf8.RuneCountInString(s)
return p
}
// An Expr represents an input element.
type Expr interface {
// Span returns the start and end position of the expression,
// excluding leading or trailing comments.
Span() (start, end Position)
// Comment returns the comments attached to the expression.
// This method would normally be named 'Comments' but that
// would interfere with embedding a type of the same name.
Comment() *Comments
}
// A Comment represents a single # comment.
type Comment struct {
Start Position
Token string // without trailing newline
Suffix bool // an end of line (not whole line) comment
}
// Comments collects the comments associated with an expression.
type Comments struct {
Before []Comment // whole-line comments before this expression
Suffix []Comment // end-of-line comments after this expression
// For top-level expressions only, After lists whole-line
// comments following the expression.
After []Comment
}
// Comment returns the receiver. This isn't useful by itself, but
// a Comments struct is embedded into all the expression
// implementation types, and this gives each of those a Comment
// method to satisfy the Expr interface.
func (c *Comments) Comment() *Comments {
return c
}
// A File represents an entire BUILD file.
type File struct {
Path string // file path, relative to workspace directory
Comments
Stmt []Expr
}
func (x *File) Span() (start, end Position) {
if len(x.Stmt) == 0 {
return
}
start, _ = x.Stmt[0].Span()
_, end = x.Stmt[len(x.Stmt)-1].Span()
return start, end
}
// A CommentBlock represents a top-level block of comments separate
// from any rule.
type CommentBlock struct {
Comments
Start Position
}
func (x *CommentBlock) Span() (start, end Position) {
return x.Start, x.Start
}
// A PythonBlock represents a blob of Python code, typically a def or for loop.
type PythonBlock struct {
Comments
Start Position
Token string // raw Python code, including final newline
}
func (x *PythonBlock) Span() (start, end Position) {
return x.Start, x.Start.add(x.Token)
}
// A LiteralExpr represents a literal identifier or number.
type LiteralExpr struct {
Comments
Start Position
Token string // identifier token
}
func (x *LiteralExpr) Span() (start, end Position) {
return x.Start, x.Start.add(x.Token)
}
// A StringExpr represents a single literal string.
type StringExpr struct {
Comments
Start Position
Value string // string value (decoded)
TripleQuote bool // triple quote output
End Position
// To allow specific formatting of string literals,
// at least within our requirements, record the
// preferred form of Value. This field is a hint:
// it is only used if it is a valid quoted form for Value.
Token string
}
func (x *StringExpr) Span() (start, end Position) {
return x.Start, x.End
}
// An End represents the end of a parenthesized or bracketed expression.
// It is a place to hang comments.
type End struct {
Comments
Pos Position
}
func (x *End) Span() (start, end Position) {
return x.Pos, x.Pos.add(")")
}
// A CallExpr represents a function call expression: X(List).
type CallExpr struct {
Comments
X Expr
ListStart Position // position of (
List []Expr
End // position of )
ForceCompact bool // force compact (non-multiline) form when printing
ForceMultiLine bool // force multiline form when printing
}
func (x *CallExpr) Span() (start, end Position) {
start, _ = x.X.Span()
return start, x.End.Pos.add(")")
}
// A DotExpr represents a field selector: X.Name.
type DotExpr struct {
Comments
X Expr
Dot Position
NamePos Position
Name string
}
func (x *DotExpr) Span() (start, end Position) {
start, _ = x.X.Span()
return start, x.NamePos.add(x.Name)
}
// A ListForExpr represents a list comprehension expression: [X for ... if ...].
type ListForExpr struct {
Comments
ForceMultiLine bool // split expression across multiple lines
Brack string // "", "()", or "[]"
Start Position
X Expr
For []*ForClauseWithIfClausesOpt
End
}
func (x *ListForExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add("]")
}
// A ForClause represents a for clause in a list comprehension: for Var in Expr.
type ForClause struct {
Comments
For Position
Var []Expr
In Position
Expr Expr
}
func (x *ForClause) Span() (start, end Position) {
_, end = x.Expr.Span()
return x.For, end
}
// An IfClause represents an if clause in a list comprehension: if Cond.
type IfClause struct {
Comments
If Position
Cond Expr
}
func (x *IfClause) Span() (start, end Position) {
_, end = x.Cond.Span()
return x.If, end
}
// A ForClauseWithIfClausesOpt represents a for clause in a list comprehension followed by optional
// if expressions: for ... in ... [if ... if ...]
type ForClauseWithIfClausesOpt struct {
Comments
For *ForClause
Ifs []*IfClause
}
func (x *ForClauseWithIfClausesOpt) Span() (start, end Position) {
start, end = x.For.Span()
if len(x.Ifs) > 0 {
_, end = x.Ifs[len(x.Ifs)-1].Span()
}
return start, end
}
// A KeyValueExpr represents a dictionary entry: Key: Value.
type KeyValueExpr struct {
Comments
Key Expr
Colon Position
Value Expr
}
func (x *KeyValueExpr) Span() (start, end Position) {
start, _ = x.Key.Span()
_, end = x.Value.Span()
return start, end
}
// A DictExpr represents a dictionary literal: { List }.
type DictExpr struct {
Comments
Start Position
List []Expr // all *KeyValueExprs
Comma Position // position of trailing comma, if any
End
ForceMultiLine bool // force multiline form when printing
}
func (x *DictExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add("}")
}
// A ListExpr represents a list literal: [ List ].
type ListExpr struct {
Comments
Start Position
List []Expr
Comma Position // position of trailing comma, if any
End
ForceMultiLine bool // force multiline form when printing
}
func (x *ListExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add("]")
}
// A SetExpr represents a set literal: { List }.
type SetExpr struct {
Comments
Start Position
List []Expr
Comma Position // position of trailing comma, if any
End
ForceMultiLine bool // force multiline form when printing
}
func (x *SetExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add("}")
}
// A TupleExpr represents a tuple literal: (List)
type TupleExpr struct {
Comments
Start Position
List []Expr
Comma Position // position of trailing comma, if any
End
ForceCompact bool // force compact (non-multiline) form when printing
ForceMultiLine bool // force multiline form when printing
}
func (x *TupleExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add(")")
}
// A UnaryExpr represents a unary expression: Op X.
type UnaryExpr struct {
Comments
OpStart Position
Op string
X Expr
}
func (x *UnaryExpr) Span() (start, end Position) {
_, end = x.X.Span()
return x.OpStart, end
}
// A BinaryExpr represents a binary expression: X Op Y.
type BinaryExpr struct {
Comments
X Expr
OpStart Position
Op string
LineBreak bool // insert line break between Op and Y
Y Expr
}
func (x *BinaryExpr) Span() (start, end Position) {
start, _ = x.X.Span()
_, end = x.Y.Span()
return start, end
}
// A ParenExpr represents a parenthesized expression: (X).
type ParenExpr struct {
Comments
Start Position
X Expr
End
ForceMultiLine bool // insert line break after opening ( and before closing )
}
func (x *ParenExpr) Span() (start, end Position) {
return x.Start, x.End.Pos.add(")")
}
// A SliceExpr represents a slice expression: X[Y:Z].
type SliceExpr struct {
Comments
X Expr
SliceStart Position
Y Expr
Colon Position
Z Expr
End Position
}
func (x *SliceExpr) Span() (start, end Position) {
start, _ = x.X.Span()
return start, x.End
}
// An IndexExpr represents an index expression: X[Y].
type IndexExpr struct {
Comments
X Expr
IndexStart Position
Y Expr
End Position
}
func (x *IndexExpr) Span() (start, end Position) {
start, _ = x.X.Span()
return start, x.End
}
// A LambdaExpr represents a lambda expression: lambda Var: Expr.
type LambdaExpr struct {
Comments
Lambda Position
Var []Expr
Colon Position
Expr Expr
}
func (x *LambdaExpr) Span() (start, end Position) {
_, end = x.Expr.Span()
return x.Lambda, end
}
// ConditionalExpr represents the conditional: X if TEST else ELSE.
type ConditionalExpr struct {
Comments
Then Expr
IfStart Position
Test Expr
ElseStart Position
Else Expr
}
// Span returns the start and end position of the expression,
// excluding leading or trailing comments.
func (x *ConditionalExpr) Span() (start, end Position) {
start, _ = x.Then.Span()
_, end = x.Else.Span()
return start, end
}

132
vendor/github.com/bazelbuild/buildtools/build/walk.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package build
// Walk walks the expression tree v, calling f on all subexpressions
// in a preorder traversal.
//
// The stk argument is the stack of expressions in the recursion above x,
// from outermost to innermost.
//
func Walk(v Expr, f func(x Expr, stk []Expr)) {
var stack []Expr
walk1(&v, &stack, func(x Expr, stk []Expr) Expr {
f(x, stk)
return nil
})
}
// WalkAndUpdate walks the expression tree v, calling f on all subexpressions
// in a preorder traversal. If f returns a non-nil value, the tree is mutated.
// The new value replaces the old one.
//
// The stk argument is the stack of expressions in the recursion above x,
// from outermost to innermost.
//
func Edit(v Expr, f func(x Expr, stk []Expr) Expr) Expr {
var stack []Expr
return walk1(&v, &stack, f)
}
// walk1 is the actual implementation of Walk and WalkAndUpdate.
// It has the same signature and meaning as Walk,
// except that it maintains in *stack the current stack
// of nodes. Using a pointer to a slice here ensures that
// as the stack grows and shrinks the storage can be
// reused for the next growth.
func walk1(v *Expr, stack *[]Expr, f func(x Expr, stk []Expr) Expr) Expr {
if v == nil {
return nil
}
if res := f(*v, *stack); res != nil {
*v = res
}
*stack = append(*stack, *v)
switch v := (*v).(type) {
case *File:
for _, stmt := range v.Stmt {
walk1(&stmt, stack, f)
}
case *DotExpr:
walk1(&v.X, stack, f)
case *IndexExpr:
walk1(&v.X, stack, f)
walk1(&v.Y, stack, f)
case *KeyValueExpr:
walk1(&v.Key, stack, f)
walk1(&v.Value, stack, f)
case *SliceExpr:
walk1(&v.X, stack, f)
if v.Y != nil {
walk1(&v.Y, stack, f)
}
if v.Z != nil {
walk1(&v.Z, stack, f)
}
case *ParenExpr:
walk1(&v.X, stack, f)
case *UnaryExpr:
walk1(&v.X, stack, f)
case *BinaryExpr:
walk1(&v.X, stack, f)
walk1(&v.Y, stack, f)
case *LambdaExpr:
for i := range v.Var {
walk1(&v.Var[i], stack, f)
}
walk1(&v.Expr, stack, f)
case *CallExpr:
walk1(&v.X, stack, f)
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *ListExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *SetExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *TupleExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *DictExpr:
for i := range v.List {
walk1(&v.List[i], stack, f)
}
case *ListForExpr:
walk1(&v.X, stack, f)
for _, c := range v.For {
for j := range c.For.Var {
walk1(&c.For.Var[j], stack, f)
}
walk1(&c.For.Expr, stack, f)
for _, i := range c.Ifs {
walk1(&i.Cond, stack, f)
}
}
case *ConditionalExpr:
walk1(&v.Then, stack, f)
walk1(&v.Test, stack, f)
walk1(&v.Else, stack, f)
}
*stack = (*stack)[:len(*stack)-1]
return *v
}

25
vendor/github.com/bazelbuild/buildtools/tables/BUILD generated vendored Normal file
View File

@@ -0,0 +1,25 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"jsonparser.go",
"tables.go",
],
importpath = "github.com/bazelbuild/buildtools/tables",
visibility = ["//visibility:public"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,62 @@
/*
Copyright 2017 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package tables
import (
"encoding/json"
"io/ioutil"
)
type Definitions struct {
IsLabelArg map[string]bool
LabelBlacklist map[string]bool
IsSortableListArg map[string]bool
SortableBlacklist map[string]bool
SortableWhitelist map[string]bool
NamePriority map[string]int
StripLabelLeadingSlashes bool
ShortenAbsoluteLabelsToRelative bool
}
// ParseJSONDefinitions reads and parses JSON table definitions from file.
func ParseJSONDefinitions(file string) (Definitions, error) {
var definitions Definitions
data, err := ioutil.ReadFile(file)
if err != nil {
return definitions, err
}
err = json.Unmarshal(data, &definitions)
return definitions, err
}
// ParseAndUpdateJSONDefinitions reads definitions from file and merges or
// overrides the values in memory.
func ParseAndUpdateJSONDefinitions(file string, merge bool) error {
definitions, err := ParseJSONDefinitions(file)
if err != nil {
return err
}
if merge {
MergeTables(definitions.IsLabelArg, definitions.LabelBlacklist, definitions.IsSortableListArg, definitions.SortableBlacklist, definitions.SortableWhitelist, definitions.NamePriority, definitions.StripLabelLeadingSlashes, definitions.ShortenAbsoluteLabelsToRelative)
} else {
OverrideTables(definitions.IsLabelArg, definitions.LabelBlacklist, definitions.IsSortableListArg, definitions.SortableBlacklist, definitions.SortableWhitelist, definitions.NamePriority, definitions.StripLabelLeadingSlashes, definitions.ShortenAbsoluteLabelsToRelative)
}
return nil
}

View File

@@ -0,0 +1,237 @@
/*
Copyright 2016 Google Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Tables about what Buildifier can and cannot edit.
// Perhaps eventually this will be
// derived from the BUILD encyclopedia.
package tables
// IsLabelArg: a named argument to a rule call is considered to have a value
// that can be treated as a label or list of labels if the name
// is one of these names. There is a separate blacklist for
// rule-specific exceptions.
var IsLabelArg = map[string]bool{
"app_target": true,
"appdir": true,
"base_package": true,
"build_deps": true,
"cc_deps": true,
"ccdeps": true,
"common_deps": true,
"compile_deps": true,
"compiler": true,
"data": true,
"default_visibility": true,
"dep": true,
"deps": true,
"deps_java": true,
"dont_depend_on": true,
"env_deps": true,
"envscripts": true,
"exported_deps": true,
"exports": true,
"externs_list": true,
"files": true,
"globals": true,
"implementation": true,
"implements": true,
"includes": true,
"interface": true,
"jar": true,
"jars": true,
"javadeps": true,
"lib_deps": true,
"library": true,
"malloc": true,
"model": true,
"mods": true,
"module_deps": true,
"module_target": true,
"of": true,
"plugins": true,
"proto_deps": true,
"proto_target": true,
"protos": true,
"resource": true,
"resources": true,
"runtime_deps": true,
"scope": true,
"shared_deps": true,
"similar_deps": true,
"source_jar": true,
"src": true,
"srcs": true,
"stripped_targets": true,
"suites": true,
"swigdeps": true,
"target": true,
"target_devices": true,
"target_platforms": true,
"template": true,
"test": true,
"tests": true,
"tests_deps": true,
"tool": true,
"tools": true,
"visibility": true,
}
// LabelBlacklist is the list of call arguments that cannot be
// shortened, because they are not interpreted using the same
// rules as for other labels.
var LabelBlacklist = map[string]bool{
// Shortening this can cause visibility checks to fail.
"package_group.includes": true,
}
// IsSortableListArg: a named argument to a rule call is considered to be a sortable list
// if the name is one of these names. There is a separate blacklist for
// rule-specific exceptions.
var IsSortableListArg = map[string]bool{
"cc_deps": true,
"common_deps": true,
"compile_deps": true,
"configs": true,
"constraints": true,
"data": true,
"default_visibility": true,
"deps": true,
"deps_java": true,
"exported_deps": true,
"exports": true,
"filegroups": true,
"files": true,
"hdrs": true,
"imports": true,
"includes": true,
"inherits": true,
"javadeps": true,
"lib_deps": true,
"module_deps": true,
"out": true,
"outs": true,
"packages": true,
"plugin_modules": true,
"proto_deps": true,
"protos": true,
"pubs": true,
"resources": true,
"runtime_deps": true,
"shared_deps": true,
"similar_deps": true,
"srcs": true,
"swigdeps": true,
"swig_includes": true,
"tags": true,
"tests": true,
"tools": true,
"to_start_extensions": true,
"visibility": true,
}
// SortableBlacklist records specific rule arguments that must not be reordered.
var SortableBlacklist = map[string]bool{
"genrule.outs": true,
"genrule.srcs": true,
}
// SortableWhitelist records specific rule arguments that are guaranteed
// to be reorderable, because bazel re-sorts the list itself after reading the BUILD file.
var SortableWhitelist = map[string]bool{
"cc_inc_library.hdrs": true,
"cc_library.hdrs": true,
"java_library.srcs": true,
"java_library.resources": true,
"java_binary.srcs": true,
"java_binary.resources": true,
"java_test.srcs": true,
"java_test.resources": true,
"java_library.constraints": true,
"java_import.constraints": true,
}
// NamePriority maps an argument name to its sorting priority.
//
// NOTE(bazel-team): These are the old buildifier rules. It is likely that this table
// will change, perhaps swapping in a separate table for each call,
// derived from the order used in the Build Encyclopedia.
var NamePriority = map[string]int{
"name": -99,
"gwt_name": -98,
"package_name": -97,
"visible_node_name": -96, // for boq_initial_css_modules and boq_jswire_test_suite
"size": -95,
"timeout": -94,
"testonly": -93,
"src": -92,
"srcdir": -91,
"srcs": -90,
"out": -89,
"outs": -88,
"hdrs": -87,
"has_services": -86, // before api versions, for proto
"include": -85, // before exclude, for glob
"of": -84, // for check_dependencies
"baseline": -83, // for searchbox_library
// All others sort here, at 0.
"destdir": 1,
"exports": 2,
"runtime_deps": 3,
"deps": 4,
"implementation": 5,
"implements": 6,
"alwayslink": 7,
}
var StripLabelLeadingSlashes = false
var ShortenAbsoluteLabelsToRelative = false
// OverrideTables allows a user of the build package to override the special-case rules. The user-provided tables replace the built-in tables.
func OverrideTables(labelArg, blacklist, sortableListArg, sortBlacklist, sortWhitelist map[string]bool, namePriority map[string]int, stripLabelLeadingSlashes, shortenAbsoluteLabelsToRelative bool) {
IsLabelArg = labelArg
LabelBlacklist = blacklist
IsSortableListArg = sortableListArg
SortableBlacklist = sortBlacklist
SortableWhitelist = sortWhitelist
NamePriority = namePriority
StripLabelLeadingSlashes = stripLabelLeadingSlashes
ShortenAbsoluteLabelsToRelative = shortenAbsoluteLabelsToRelative
}
// MergeTables allows a user of the build package to override the special-case rules. The user-provided tables are merged into the built-in tables.
func MergeTables(labelArg, blacklist, sortableListArg, sortBlacklist, sortWhitelist map[string]bool, namePriority map[string]int, stripLabelLeadingSlashes, shortenAbsoluteLabelsToRelative bool) {
for k, v := range labelArg {
IsLabelArg[k] = v
}
for k, v := range blacklist {
LabelBlacklist[k] = v
}
for k, v := range sortableListArg {
IsSortableListArg[k] = v
}
for k, v := range sortBlacklist {
SortableBlacklist[k] = v
}
for k, v := range sortWhitelist {
SortableWhitelist[k] = v
}
for k, v := range namePriority {
NamePriority[k] = v
}
StripLabelLeadingSlashes = stripLabelLeadingSlashes || StripLabelLeadingSlashes
ShortenAbsoluteLabelsToRelative = shortenAbsoluteLabelsToRelative || ShortenAbsoluteLabelsToRelative
}