// BUILD file parser. // This is a yacc grammar. Its lexer is in lex.go. // // For a good introduction to writing yacc grammars, see // Kernighan and Pike's book The Unix Programming Environment. // // The definitive yacc manual is // Stephen C. Johnson and Ravi Sethi, "Yacc: A Parser Generator", // online at http://plan9.bell-labs.com/sys/doc/yacc.pdf. %{ package build %} // The generated parser puts these fields in a struct named yySymType. // (The name %union is historical, but it is inaccurate for Go.) %union { // input tokens tok string // raw input syntax str string // decoding of quoted string pos Position // position of token triple bool // was string triple quoted? // partial syntax trees expr Expr exprs []Expr string *StringExpr strings []*StringExpr ifstmt *IfStmt loadarg *struct{from Ident; to Ident} loadargs []*struct{from Ident; to Ident} // supporting information comma Position // position of trailing comma in list, if present lastStmt Expr // most recent rule, to attach line comments to } // These declarations set the type for a $ reference ($$, $1, $2, ...) // based on the kind of symbol it refers to. Other fields can be referred // to explicitly, as in $1. // // %token is for input tokens generated by the lexer. // %type is for higher-level grammar rules defined here. // // It is possible to put multiple tokens per line, but it is easier to // keep ordered using a sparser one-per-line list. %token '%' %token '(' %token ')' %token '*' %token '+' %token ',' %token '-' %token '.' %token '/' %token ':' %token '<' %token '=' %token '>' %token '[' %token ']' %token '{' %token '}' %token '|' %token '&' %token '^' %token '~' // By convention, yacc token names are all caps. // However, we do not want to export them from the Go package // we are creating, so prefix them all with underscores. %token _AUGM // augmented assignment %token _AND // keyword and %token _COMMENT // top-level # comment %token _EOF // end of file %token _EQ // operator == %token _FOR // keyword for %token _GE // operator >= %token _IDENT // non-keyword identifier %token _NUMBER // number %token _IF // keyword if %token _ELSE // keyword else %token _ELIF // keyword elif %token _IN // keyword in %token _IS // keyword is %token _LAMBDA // keyword lambda %token _LOAD // keyword load %token _LE // operator <= %token _NE // operator != %token _STAR_STAR // operator ** %token _INT_DIV // operator // %token _BIT_LSH // bitwise operator << %token _BIT_RSH // bitwise operator >> %token _NOT // keyword not %token _OR // keyword or %token _STRING // quoted string %token _DEF // keyword def %token _RETURN // keyword return %token _PASS // keyword pass %token _BREAK // keyword break %token _CONTINUE // keyword continue %token _INDENT // indentation %token _UNINDENT // unindentation %type comma_opt %type argument %type arguments %type arguments_opt %type parameter %type parameters %type parameters_opt %type test %type test_opt %type tests_opt %type primary_expr %type expr %type expr_opt %type tests %type exprs %type exprs_opt %type loop_vars %type for_clause %type for_clause_with_if_clauses_opt %type for_clauses_with_if_clauses_opt %type ident %type number %type stmts %type stmt // a simple_stmt or a for/if/def block %type block_stmt // a single for/if/def statement %type if_else_block // a complete if-elif-else block %type if_chain // an elif-elif-else chain %type elif // `elif` or `else if` token(s) %type simple_stmt // One or many small_stmts on one line, e.g. 'a = f(x); return str(a)' %type small_stmt // A single statement, e.g. 'a = f(x)' %type small_stmts_continuation // A sequence of `';' small_stmt` %type keyvalue %type keyvalues %type keyvalues_no_comma %type string %type strings %type suite %type comments %type load_argument %type load_arguments // Operator precedence. // Operators listed lower in the table bind tighter. // We tag rules with this fake, low precedence to indicate // that when the rule is involved in a shift/reduce // conflict, we prefer that the parser shift (try for a longer parse). // Shifting is the default resolution anyway, but stating it explicitly // silences yacc's warning for that specific case. %left ShiftInstead %left '\n' %left _ASSERT // '=' and augmented assignments have the lowest precedence // e.g. "x = a if c > 0 else 'bar'" // followed by // 'if' and 'else' which have lower precedence than all other operators. // e.g. "a, b if c > 0 else 'foo'" is either a tuple of (a,b) or 'foo' // and not a tuple of "(a, (b if ... ))" %left '=' _AUGM %left _IF _ELSE _ELIF %left ',' %left ':' %left _IS %left _OR %left _AND %left '<' '>' _EQ _NE _LE _GE _NOT _IN %left '|' %left '^' %left '&' %left _BIT_LSH _BIT_RSH %left '+' '-' %left '*' '/' '%' _INT_DIV %left '.' '[' '(' %right _UNARY %left _STRING %% // Grammar rules. // // A note on names: if foo is a rule, then foos is a sequence of foos // (with interleaved commas or other syntax as appropriate) // and foo_opt is an optional foo. file: stmts _EOF { yylex.(*input).file = &File{Stmt: $1} return 0 } suite: '\n' comments _INDENT stmts _UNINDENT { statements := $4 if $2 != nil { // $2 can only contain *CommentBlock objects, each of them contains a non-empty After slice cb := $2[len($2)-1].(*CommentBlock) // $4 can't be empty and can't start with a comment stmt := $4[0] start, _ := stmt.Span() if start.Line - cb.After[len(cb.After)-1].Start.Line == 1 { // The first statement of $4 starts on the next line after the last comment of $2. // Attach the last comment to the first statement stmt.Comment().Before = cb.After $2 = $2[:len($2)-1] } statements = append($2, $4...) } $$ = statements $$ = $4 } | simple_stmt linebreaks_opt { $$ = $1 } linebreaks_opt: | linebreaks_opt '\n' comments: { $$ = nil $$ = nil } | comments _COMMENT '\n' { $$ = $1 $$ = $1 if $$ == nil { cb := &CommentBlock{Start: $2} $$ = append($$, cb) $$ = cb } com := $$.Comment() com.After = append(com.After, Comment{Start: $2, Token: $2}) } | comments '\n' { $$ = $1 $$ = nil } stmts: { $$ = nil $$ = nil } | stmts stmt { // If this statement follows a comment block, // attach the comments to the statement. if cb, ok := $1.(*CommentBlock); ok { $$ = append($1[:len($1)-1], $2...) $2[0].Comment().Before = cb.After $$ = $2 break } // Otherwise add to list. $$ = append($1, $2...) $$ = $2 // Consider this input: // // foo() // # bar // baz() // // If we've just parsed baz(), the # bar is attached to // foo() as an After comment. Make it a Before comment // for baz() instead. if x := $1; x != nil { com := x.Comment() // stmt is never empty $2[0].Comment().Before = com.After com.After = nil } } | stmts '\n' { // Blank line; sever last rule from future comments. $$ = $1 $$ = nil } | stmts _COMMENT '\n' { $$ = $1 $$ = $1 if $$ == nil { cb := &CommentBlock{Start: $2} $$ = append($$, cb) $$ = cb } com := $$.Comment() com.After = append(com.After, Comment{Start: $2, Token: $2}) } stmt: simple_stmt { $$ = $1 $$ = $1[len($1)-1] } | block_stmt { $$ = []Expr{$1} $$ = $1 if cbs := extractTrailingComments($1); len(cbs) > 0 { $$ = append($$, cbs...) $$ = cbs[len(cbs)-1] if $1 == nil { $$ = nil } } } block_stmt: _DEF _IDENT '(' parameters_opt ')' ':' suite { $$ = &DefStmt{ Function: Function{ StartPos: $1, Params: $4, Body: $7, }, Name: $2, ColonPos: $6, ForceCompact: forceCompact($3, $4, $5), ForceMultiLine: forceMultiLine($3, $4, $5), } $$ = $7 } | _FOR loop_vars _IN expr ':' suite { $$ = &ForStmt{ For: $1, Vars: $2, X: $4, Body: $6, } $$ = $6 } | if_else_block { $$ = $1 $$ = $1 } // One or several if-elif-elif statements if_chain: _IF expr ':' suite { $$ = &IfStmt{ If: $1, Cond: $2, True: $4, } $$ = $4 } | if_chain elif expr ':' suite { $$ = $1 inner := $1 for len(inner.False) == 1 { inner = inner.False[0].(*IfStmt) } inner.ElsePos = End{Pos: $2} inner.False = []Expr{ &IfStmt{ If: $2, Cond: $3, True: $5, }, } $$ = $5 } // A complete if-elif-elif-else chain if_else_block: if_chain | if_chain _ELSE ':' suite { $$ = $1 inner := $1 for len(inner.False) == 1 { inner = inner.False[0].(*IfStmt) } inner.ElsePos = End{Pos: $2} inner.False = $4 $$ = $4 } elif: _ELSE _IF | _ELIF simple_stmt: small_stmt small_stmts_continuation semi_opt '\n' { $$ = append([]Expr{$1}, $2...) $$ = $$[len($$)-1] } small_stmts_continuation: { $$ = []Expr{} } | small_stmts_continuation ';' small_stmt { $$ = append($1, $3) } small_stmt: expr %prec ShiftInstead | _RETURN expr { $$ = &ReturnStmt{ Return: $1, Result: $2, } } | _RETURN { $$ = &ReturnStmt{ Return: $1, } } | expr '=' expr { $$ = binary($1, $2, $2, $3) } | expr _AUGM expr { $$ = binary($1, $2, $2, $3) } | _PASS { $$ = &BranchStmt{ Token: $1, TokenPos: $1, } } | _BREAK { $$ = &BranchStmt{ Token: $1, TokenPos: $1, } } | _CONTINUE { $$ = &BranchStmt{ Token: $1, TokenPos: $1, } } semi_opt: | ';' primary_expr: ident | number | primary_expr '.' _IDENT { $$ = &DotExpr{ X: $1, Dot: $2, NamePos: $3, Name: $3, } } | _LOAD '(' string ',' load_arguments comma_opt ')' { load := &LoadStmt{ Load: $1, Module: $3, Rparen: End{Pos: $7}, ForceCompact: $1.Line == $7.Line, } for _, arg := range $5 { load.From = append(load.From, &arg.from) load.To = append(load.To, &arg.to) } $$ = load } | primary_expr '(' arguments_opt ')' { $$ = &CallExpr{ X: $1, ListStart: $2, List: $3, End: End{Pos: $4}, ForceCompact: forceCompact($2, $3, $4), ForceMultiLine: forceMultiLine($2, $3, $4), } } | primary_expr '[' expr ']' { $$ = &IndexExpr{ X: $1, IndexStart: $2, Y: $3, End: $4, } } | primary_expr '[' expr_opt ':' test_opt ']' { $$ = &SliceExpr{ X: $1, SliceStart: $2, From: $3, FirstColon: $4, To: $5, End: $6, } } | primary_expr '[' expr_opt ':' test_opt ':' test_opt ']' { $$ = &SliceExpr{ X: $1, SliceStart: $2, From: $3, FirstColon: $4, To: $5, SecondColon: $6, Step: $7, End: $8, } } | strings %prec ShiftInstead { if len($1) == 1 { $$ = $1[0] break } $$ = $1[0] for _, x := range $1[1:] { _, end := $$.Span() $$ = binary($$, end, "+", x) } } | '[' tests_opt ']' { $$ = &ListExpr{ Start: $1, List: $2, End: End{Pos: $3}, ForceMultiLine: forceMultiLine($1, $2, $3), } } | '[' test for_clauses_with_if_clauses_opt ']' { $$ = &Comprehension{ Curly: false, Lbrack: $1, Body: $2, Clauses: $3, End: End{Pos: $4}, ForceMultiLine: forceMultiLineComprehension($1, $2, $3, $4), } } | '{' keyvalue for_clauses_with_if_clauses_opt '}' { $$ = &Comprehension{ Curly: true, Lbrack: $1, Body: $2, Clauses: $3, End: End{Pos: $4}, ForceMultiLine: forceMultiLineComprehension($1, $2, $3, $4), } } | '{' keyvalues '}' { $$ = &DictExpr{ Start: $1, List: $2, End: End{Pos: $3}, ForceMultiLine: forceMultiLine($1, $2, $3), } } | '{' tests comma_opt '}' // TODO: remove, not supported { $$ = &SetExpr{ Start: $1, List: $2, End: End{Pos: $4}, ForceMultiLine: forceMultiLine($1, $2, $4), } } | '(' tests_opt ')' { if len($2) == 1 && $2.Line == 0 { // Just a parenthesized expression, not a tuple. $$ = &ParenExpr{ Start: $1, X: $2[0], End: End{Pos: $3}, ForceMultiLine: forceMultiLine($1, $2, $3), } } else { $$ = &TupleExpr{ Start: $1, List: $2, End: End{Pos: $3}, ForceCompact: forceCompact($1, $2, $3), ForceMultiLine: forceMultiLine($1, $2, $3), } } } arguments_opt: { $$ = nil } | arguments comma_opt { $$ = $1 } arguments: argument { $$ = []Expr{$1} } | arguments ',' argument { $$ = append($1, $3) } argument: test | ident '=' test { $$ = binary($1, $2, $2, $3) } | '*' test { $$ = unary($1, $1, $2) } | _STAR_STAR test { $$ = unary($1, $1, $2) } load_arguments: load_argument { $$ = []*struct{from Ident; to Ident}{$1} } | load_arguments ',' load_argument { $1 = append($1, $3) $$ = $1 } load_argument: string { start := $1.Start.add("'") if $1.TripleQuote { start = start.add("''") } $$ = &struct{from Ident; to Ident}{ from: Ident{ Name: $1.Value, NamePos: start, }, to: Ident{ Name: $1.Value, NamePos: start, }, } } | ident '=' string { start := $3.Start.add("'") if $3.TripleQuote { start = start.add("''") } $$ = &struct{from Ident; to Ident}{ from: Ident{ Name: $3.Value, NamePos: start, }, to: *$1.(*Ident), } } parameters_opt: { $$ = nil } | parameters comma_opt { $$ = $1 } parameters: parameter { $$ = []Expr{$1} } | parameters ',' parameter { $$ = append($1, $3) } parameter: ident | ident '=' test { $$ = binary($1, $2, $2, $3) } | '*' ident { $$ = unary($1, $1, $2) } | '*' { $$ = unary($1, $1, nil) } | _STAR_STAR ident { $$ = unary($1, $1, $2) } expr: test | expr ',' test { tuple, ok := $1.(*TupleExpr) if !ok || !tuple.NoBrackets { tuple = &TupleExpr{ List: []Expr{$1}, NoBrackets: true, ForceCompact: true, ForceMultiLine: false, } } tuple.List = append(tuple.List, $3) $$ = tuple } expr_opt: { $$ = nil } | expr exprs: expr { $$ = []Expr{$1} } | exprs ',' expr { $$ = append($1, $3) } exprs_opt: { $$ = nil } | exprs comma_opt { $$ = $1 } test: primary_expr | _LAMBDA exprs_opt ':' expr // TODO: remove, not supported { $$ = &LambdaExpr{ Function: Function{ StartPos: $1, Params: $2, Body: []Expr{$4}, }, } } | _NOT test %prec _UNARY { $$ = unary($1, $1, $2) } | '-' test %prec _UNARY { $$ = unary($1, $1, $2) } | '+' test %prec _UNARY { $$ = unary($1, $1, $2) } | '~' test %prec _UNARY { $$ = unary($1, $1, $2) } | test '*' test { $$ = binary($1, $2, $2, $3) } | test '%' test { $$ = binary($1, $2, $2, $3) } | test '/' test { $$ = binary($1, $2, $2, $3) } | test _INT_DIV test { $$ = binary($1, $2, $2, $3) } | test '+' test { $$ = binary($1, $2, $2, $3) } | test '-' test { $$ = binary($1, $2, $2, $3) } | test '<' test { $$ = binary($1, $2, $2, $3) } | test '>' test { $$ = binary($1, $2, $2, $3) } | test _EQ test { $$ = binary($1, $2, $2, $3) } | test _LE test { $$ = binary($1, $2, $2, $3) } | test _NE test { $$ = binary($1, $2, $2, $3) } | test _GE test { $$ = binary($1, $2, $2, $3) } | test _IN test { $$ = binary($1, $2, $2, $3) } | test _NOT _IN test { $$ = binary($1, $2, "not in", $4) } | test _OR test { $$ = binary($1, $2, $2, $3) } | test _AND test { $$ = binary($1, $2, $2, $3) } | test '|' test { $$ = binary($1, $2, $2, $3) } | test '&' test { $$ = binary($1, $2, $2, $3) } | test '^' test { $$ = binary($1, $2, $2, $3) } | test _BIT_LSH test { $$ = binary($1, $2, $2, $3) } | test _BIT_RSH test { $$ = binary($1, $2, $2, $3) } | test _IS test { if b, ok := $3.(*UnaryExpr); ok && b.Op == "not" { $$ = binary($1, $2, "is not", b.X) } else { $$ = binary($1, $2, $2, $3) } } | test _IF test _ELSE test { $$ = &ConditionalExpr{ Then: $1, IfStart: $2, Test: $3, ElseStart: $4, Else: $5, } } tests: test { $$ = []Expr{$1} } | tests ',' test { $$ = append($1, $3) } test_opt: { $$ = nil } | test tests_opt: { $$, $$ = nil, Position{} } | tests comma_opt { $$, $$ = $1, $2 } // comma_opt is an optional comma. If the comma is present, // the rule's value is the position of the comma. Otherwise // the rule's value is the zero position. Tracking this // lets us distinguish (x) and (x,). comma_opt: { $$ = Position{} } | ',' keyvalue: test ':' test { $$ = &KeyValueExpr{ Key: $1, Colon: $2, Value: $3, } } keyvalues_no_comma: keyvalue { $$ = []Expr{$1} } | keyvalues_no_comma ',' keyvalue { $$ = append($1, $3) } keyvalues: { $$ = nil } | keyvalues_no_comma { $$ = $1 } | keyvalues_no_comma ',' { $$ = $1 } loop_vars: primary_expr | loop_vars ',' primary_expr { tuple, ok := $1.(*TupleExpr) if !ok || !tuple.NoBrackets { tuple = &TupleExpr{ List: []Expr{$1}, NoBrackets: true, ForceCompact: true, ForceMultiLine: false, } } tuple.List = append(tuple.List, $3) $$ = tuple } string: _STRING { $$ = &StringExpr{ Start: $1, Value: $1, TripleQuote: $1, End: $1.add($1), Token: $1, } } strings: string { $$ = []*StringExpr{$1} } | strings string { $$ = append($1, $2) } ident: _IDENT { $$ = &Ident{NamePos: $1, Name: $1} } number: _NUMBER { $$ = &LiteralExpr{Start: $1, Token: $1} } for_clause: _FOR loop_vars _IN test { $$ = &ForClause{ For: $1, Vars: $2, In: $3, X: $4, } } for_clause_with_if_clauses_opt: for_clause { $$ = []Expr{$1} } | for_clause_with_if_clauses_opt _IF test { $$ = append($1, &IfClause{ If: $2, Cond: $3, }) } for_clauses_with_if_clauses_opt: for_clause_with_if_clauses_opt { $$ = $1 } | for_clauses_with_if_clauses_opt for_clause_with_if_clauses_opt { $$ = append($1, $2...) } %% // Go helper code. // unary returns a unary expression with the given // position, operator, and subexpression. func unary(pos Position, op string, x Expr) Expr { return &UnaryExpr{ OpStart: pos, Op: op, X: x, } } // binary returns a binary expression with the given // operands, position, and operator. func binary(x Expr, pos Position, op string, y Expr) Expr { _, xend := x.Span() ystart, _ := y.Span() switch op { case "=", "+=", "-=", "*=", "/=", "//=", "%=", "|=": return &AssignExpr{ LHS: x, OpPos: pos, Op: op, LineBreak: xend.Line < ystart.Line, RHS: y, } } return &BinaryExpr{ X: x, OpStart: pos, Op: op, LineBreak: xend.Line < ystart.Line, Y: y, } } // isSimpleExpression returns whether an expression is simple and allowed to exist in // compact forms of sequences. // The formal criteria are the following: an expression is considered simple if it's // a literal (variable, string or a number), a literal with a unary operator or an empty sequence. func isSimpleExpression(expr *Expr) bool { switch x := (*expr).(type) { case *LiteralExpr, *StringExpr, *Ident: return true case *UnaryExpr: _, literal := x.X.(*LiteralExpr) _, ident := x.X.(*Ident) return literal || ident case *ListExpr: return len(x.List) == 0 case *TupleExpr: return len(x.List) == 0 case *DictExpr: return len(x.List) == 0 case *SetExpr: return len(x.List) == 0 default: return false } } // forceCompact returns the setting for the ForceCompact field for a call or tuple. // // NOTE 1: The field is called ForceCompact, not ForceSingleLine, // because it only affects the formatting associated with the call or tuple syntax, // not the formatting of the arguments. For example: // // call([ // 1, // 2, // 3, // ]) // // is still a compact call even though it runs on multiple lines. // // In contrast the multiline form puts a linebreak after the (. // // call( // [ // 1, // 2, // 3, // ], // ) // // NOTE 2: Because of NOTE 1, we cannot use start and end on the // same line as a signal for compact mode: the formatting of an // embedded list might move the end to a different line, which would // then look different on rereading and cause buildifier not to be // idempotent. Instead, we have to look at properties guaranteed // to be preserved by the reformatting, namely that the opening // paren and the first expression are on the same line and that // each subsequent expression begins on the same line as the last // one ended (no line breaks after comma). func forceCompact(start Position, list []Expr, end Position) bool { if len(list) <= 1 { // The call or tuple will probably be compact anyway; don't force it. return false } // If there are any named arguments or non-string, non-literal // arguments, cannot force compact mode. line := start.Line for _, x := range list { start, end := x.Span() if start.Line != line { return false } line = end.Line if !isSimpleExpression(&x) { return false } } return end.Line == line } // forceMultiLine returns the setting for the ForceMultiLine field. func forceMultiLine(start Position, list []Expr, end Position) bool { if len(list) > 1 { // The call will be multiline anyway, because it has multiple elements. Don't force it. return false } if len(list) == 0 { // Empty list: use position of brackets. return start.Line != end.Line } // Single-element list. // Check whether opening bracket is on different line than beginning of // element, or closing bracket is on different line than end of element. elemStart, elemEnd := list[0].Span() return start.Line != elemStart.Line || end.Line != elemEnd.Line } // forceMultiLineComprehension returns the setting for the ForceMultiLine field for a comprehension. func forceMultiLineComprehension(start Position, expr Expr, clauses []Expr, end Position) bool { // Return true if there's at least one line break between start, expr, each clause, and end exprStart, exprEnd := expr.Span() if start.Line != exprStart.Line { return true } previousEnd := exprEnd for _, clause := range clauses { clauseStart, clauseEnd := clause.Span() if previousEnd.Line != clauseStart.Line { return true } previousEnd = clauseEnd } return previousEnd.Line != end.Line } // extractTrailingComments extracts trailing comments of an indented block starting with the first // comment line with indentation less than the block indentation. // The comments can either belong to CommentBlock statements or to the last non-comment statement // as After-comments. func extractTrailingComments(stmt Expr) []Expr { body := getLastBody(stmt) var comments []Expr if body != nil && len(*body) > 0 { // Get the current indentation level start, _ := (*body)[0].Span() indentation := start.LineRune // Find the last non-comment statement lastNonCommentIndex := -1 for i, stmt := range *body { if _, ok := stmt.(*CommentBlock); !ok { lastNonCommentIndex = i } } if lastNonCommentIndex == -1 { return comments } // Iterate over the trailing comments, find the first comment line that's not indented enough, // dedent it and all the following comments. for i := lastNonCommentIndex; i < len(*body); i++ { stmt := (*body)[i] if comment := extractDedentedComment(stmt, indentation); comment != nil { // This comment and all the following CommentBlock statements are to be extracted. comments = append(comments, comment) comments = append(comments, (*body)[i+1:]...) *body = (*body)[:i+1] // If the current statement is a CommentBlock statement without any comment lines // it should be removed too. if i > lastNonCommentIndex && len(stmt.Comment().After) == 0 { *body = (*body)[:i] } } } } return comments } // extractDedentedComment extract the first comment line from `stmt` which indentation is smaller // than `indentation`, and all following comment lines, and returns them in a newly created // CommentBlock statement. func extractDedentedComment(stmt Expr, indentation int) Expr { for i, line := range stmt.Comment().After { // line.Start.LineRune == 0 can't exist in parsed files, it indicates that the comment line // has been added by an AST modification. Don't take such lines into account. if line.Start.LineRune > 0 && line.Start.LineRune < indentation { // This and all the following lines should be dedented cb := &CommentBlock{ Start: line.Start, Comments: Comments{After: stmt.Comment().After[i:]}, } stmt.Comment().After = stmt.Comment().After[:i] return cb } } return nil } // getLastBody returns the last body of a block statement (the only body for For- and DefStmt // objects, the last in a if-elif-else chain func getLastBody(stmt Expr) *[]Expr { switch block := stmt.(type) { case *DefStmt: return &block.Body case *ForStmt: return &block.Body case *IfStmt: if len(block.False) == 0 { return &block.True } else if len(block.False) == 1 { if next, ok := block.False[0].(*IfStmt); ok { // Recursively find the last block of the chain return getLastBody(next) } } return &block.False } return nil }