From 0124633b09d4212ec8bad21552e76a1c20bb227b Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Sat, 28 Apr 2018 01:12:26 +0200 Subject: [PATCH 1/8] Test cases for #693 --- tests/switch.c | 88 +++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 87 insertions(+), 1 deletion(-) diff --git a/tests/switch.c b/tests/switch.c index ad5691295..ebbec3a67 100644 --- a/tests/switch.c +++ b/tests/switch.c @@ -11,6 +11,7 @@ // CompoundStmt with 12 children. #include +#include #include "tests.h" void match_a_single_case() @@ -125,6 +126,42 @@ void fallthrough_several_midway_default() } } +void goto_label(bool use_goto) +{ + for (;;) { + switch (0) + { + case 3: + continue; + case 0: + if (use_goto) { + goto LABEL; + fail("code should not reach here"); + } else if (false) { + goto LABELX; + goto LABELY; + fail("code should not reach here"); + } + /* other comment */ + // some comment + /* fallthrough */ + LABELY: + case 4: + LABEL: + case 1: + pass(__func__); + break; + case 2: + ; + LABELX: + default: + fail("code should not reach here"); + break; + } + break; + } +} + void scoped_match_a_single_case() { switch (1) @@ -278,6 +315,51 @@ void scoped_fallthrough_several_midway_default() } } + +void scoped_goto_label(bool use_goto) +{ + for (;;) { + switch (0) + { + case 3: + { + continue; + } + case 0: + { + if (use_goto) { + goto LABEL; + fail("code should not reach here"); + } else if (false) { + goto LABELX; + goto LABELY; + fail("code should not reach here"); + } + /* other comment */ + // some comment + /* fallthrough */ + } + LABELY: {} + case 4: {} + LABEL: {} + case 1: + { + pass(__func__); + break; + } + case 2: + {} + LABELX: {} + default: + { + fail("code should not reach here"); + break; + } + } + break; + } +} + typedef struct I67 I67; struct I67{ int x,y; @@ -376,7 +458,7 @@ void switch_without_input() int main() { - plan(33); + plan(37); match_a_single_case(); fallthrough_to_next_case(); @@ -384,6 +466,8 @@ int main() match_default(); fallthrough_several_cases_including_default(); fallthrough_several_midway_default(); + goto_label(false); + goto_label(true); // For each of the tests above there will be identical cases that use scopes // for the case statements. @@ -393,6 +477,8 @@ int main() scoped_match_default(); scoped_fallthrough_several_cases_including_default(); scoped_fallthrough_several_midway_default(); + scoped_goto_label(false); + scoped_goto_label(true); switch_issue67(); empty_switch(); From 5a4977bf372330fb54fc241321ecc90790dd6555 Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Sat, 28 Apr 2018 01:13:35 +0200 Subject: [PATCH 2/8] Transpile switch label clause. Fixes #693 --- transpiler/switch.go | 188 +- .../x/tools/go/ast/astutil/enclosing.go | 627 ++++++ .../x/tools/go/ast/astutil/enclosing_test.go | 195 ++ .../x/tools/go/ast/astutil/imports.go | 470 +++++ .../x/tools/go/ast/astutil/imports_test.go | 1818 +++++++++++++++++ .../x/tools/go/ast/astutil/rewrite.go | 477 +++++ .../x/tools/go/ast/astutil/rewrite_test.go | 248 +++ .../golang.org/x/tools/go/ast/astutil/util.go | 14 + 8 files changed, 4016 insertions(+), 21 deletions(-) create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports_test.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go create mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go diff --git a/transpiler/switch.go b/transpiler/switch.go index 2fe0e01ea..eb5eac50f 100644 --- a/transpiler/switch.go +++ b/transpiler/switch.go @@ -3,13 +3,14 @@ package transpiler import ( + "fmt" goast "go/ast" "go/token" - "fmt" - "github.com/elliotchance/c2go/ast" "github.com/elliotchance/c2go/program" + "github.com/elliotchance/c2go/util" + "golang.org/x/tools/go/ast/astutil" ) func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( @@ -50,14 +51,16 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( cn := body.ChildNodes[i] cs, ok1 := cn.(*ast.CaseStmt) ds, ok2 := cn.(*ast.DefaultStmt) - if !ok1 && !ok2 { + ls, ok3 := cn.(*ast.LabelStmt) + if !ok1 && !ok2 && !ok3 { // Do not consider a node which is not a case or default statement here continue } lastCn := cn.Children()[len(cn.Children())-1] _, isCase := lastCn.(*ast.CaseStmt) _, isDefault := lastCn.(*ast.DefaultStmt) - if isCase || isDefault { + _, isLabel := lastCn.(*ast.LabelStmt) + if isCase || isDefault || isLabel { // Insert lastCn before next case in body (https://github.com/golang/go/wiki/SliceTricks) body.ChildNodes = append(body.ChildNodes, &ast.CompoundStmt{}) copy(body.ChildNodes[i+2:], body.ChildNodes[i+1:]) @@ -76,6 +79,9 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( if ok2 { ds.ChildNodes = ds.ChildNodes[:len(ds.ChildNodes)-1] } + if ok3 { + ls.ChildNodes = ls.ChildNodes[:len(ls.ChildNodes)-1] + } } } } @@ -99,11 +105,15 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( } } + hasLabelCase := false // Move element inside CompoundStmt for i := 0; i < len(body.Children()); i++ { switch body.Children()[i].(type) { case *ast.CaseStmt, *ast.DefaultStmt: // do nothing + case *ast.LabelStmt: + hasLabelCase = true + // do nothing else default: if i != 0 { lastStmt := body.Children()[i-1].Children() @@ -155,7 +165,11 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( // return // for i := range cases { - body := cases[i].Body + cs, ok := cases[i].(*goast.CaseClause) + if !ok { + continue + } + body := cs.Body if len(body) != 2 { continue } @@ -165,7 +179,7 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( } if !isFallThrough { if len(body) > 1 { - cases[i].Body = body + cs.Body = body } continue } @@ -174,17 +188,17 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( if vv, ok := v.List[len(v.List)-1].(*goast.BranchStmt); ok { if vv.Tok == token.BREAK { if isFallThrough { - cases[i].Body = append(v.List[:len(v.List)-1]) + cs.Body = append(v.List[:len(v.List)-1]) continue } } } if _, ok := v.List[len(v.List)-1].(*goast.ReturnStmt); ok { - cases[i].Body = body[:len(body)-1] + cs.Body = body[:len(body)-1] continue } } else { - cases[i].Body = []goast.Stmt{body[1]} + cs.Body = []goast.Stmt{body[1]} } } } @@ -200,6 +214,11 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( stmts = append(stmts, singleCase) } + if hasLabelCase { + stmts, newPost = handleLabelCases(cases, p) + preStmts, postStmts = combinePreAndPostStmts(preStmts, newPost, []goast.Stmt{}, postStmts) + } + return &goast.SwitchStmt{ Tag: condition, Body: &goast.BlockStmt{ @@ -209,7 +228,7 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( } func normalizeSwitchCases(body *ast.CompoundStmt, p *program.Program) ( - _ []*goast.CaseClause, preStmts []goast.Stmt, postStmts []goast.Stmt, err error) { + _ []goast.Stmt, preStmts []goast.Stmt, postStmts []goast.Stmt, err error) { // The body of a switch has a non uniform structure. For example: // // switch a { @@ -254,16 +273,16 @@ func normalizeSwitchCases(body *ast.CompoundStmt, p *program.Program) ( // // During this translation we also remove 'break' or append a 'fallthrough'. - cases := []*goast.CaseClause{} + cases := []goast.Stmt{} caseEndedWithBreak := false for _, x := range body.Children() { switch c := x.(type) { - case *ast.CaseStmt, *ast.DefaultStmt: + case *ast.CaseStmt, *ast.DefaultStmt, *ast.LabelStmt: var newPre, newPost []goast.Stmt cases, newPre, newPost, err = appendCaseOrDefaultToNormalizedCases(cases, c, caseEndedWithBreak, p) if err != nil { - return []*goast.CaseClause{}, nil, nil, err + return []goast.Stmt{}, nil, nil, err } caseEndedWithBreak = false @@ -276,7 +295,7 @@ func normalizeSwitchCases(body *ast.CompoundStmt, p *program.Program) ( var newPre, newPost []goast.Stmt stmt, newPre, newPost, err = transpileToStmt(x, p) if err != nil { - return []*goast.CaseClause{}, nil, nil, err + return []goast.Stmt{}, nil, nil, err } preStmts = append(preStmts, newPre...) preStmts = append(preStmts, stmt) @@ -287,20 +306,27 @@ func normalizeSwitchCases(body *ast.CompoundStmt, p *program.Program) ( return cases, preStmts, postStmts, nil } -func appendCaseOrDefaultToNormalizedCases(cases []*goast.CaseClause, +func appendCaseOrDefaultToNormalizedCases(cases []goast.Stmt, stmt ast.Node, caseEndedWithBreak bool, p *program.Program) ( - []*goast.CaseClause, []goast.Stmt, []goast.Stmt, error) { + []goast.Stmt, []goast.Stmt, []goast.Stmt, error) { preStmts := []goast.Stmt{} postStmts := []goast.Stmt{} if len(cases) > 0 && !caseEndedWithBreak { - cases[len(cases)-1].Body = append(cases[len(cases)-1].Body, &goast.BranchStmt{ - Tok: token.FALLTHROUGH, - }) + if cs, ok := cases[len(cases)-1].(*goast.CaseClause); ok { + cs.Body = append(cs.Body, &goast.BranchStmt{ + Tok: token.FALLTHROUGH, + }) + } + if ls, ok := cases[len(cases)-1].(*goast.LabeledStmt); ok { + ls.Stmt = &goast.BranchStmt{ + Tok: token.FALLTHROUGH, + } + } } caseEndedWithBreak = false - var singleCase *goast.CaseClause + var singleCase goast.Stmt var err error var newPre []goast.Stmt var newPost []goast.Stmt @@ -311,6 +337,9 @@ func appendCaseOrDefaultToNormalizedCases(cases []*goast.CaseClause, case *ast.DefaultStmt: singleCase, err = transpileDefaultStmt(c, p) + + case *ast.LabelStmt: + singleCase, newPre, newPost, err = transpileLabelStmt(c, p) } if singleCase != nil { @@ -320,7 +349,7 @@ func appendCaseOrDefaultToNormalizedCases(cases []*goast.CaseClause, preStmts, postStmts = combinePreAndPostStmts(preStmts, postStmts, newPre, newPost) if err != nil { - return []*goast.CaseClause{}, nil, nil, err + return []goast.Stmt{}, nil, nil, err } return cases, preStmts, postStmts, nil @@ -360,3 +389,120 @@ func transpileDefaultStmt(n *ast.DefaultStmt, p *program.Program) (*goast.CaseCl Body: stmts, }, nil } + +func handleLabelCases(cases []goast.Stmt, p *program.Program) (newCases []goast.Stmt, postStmts []goast.Stmt) { + // In C a switch can have labels before a case. + // Go does not support this. + // To make it work we translate the switch cases as labels to blocks appended to the switch + // For example: + // + // switch a { + // case 1: + // foo(); + // break; + // LABEL: + // case 2: + // bar(); + // default: + // baz(); + // } + // + // is transpiled as: + // + // switch a { + // case 1: + // goto SW_1_1 + // case 2: + // goto SW_1_2 + // default: + // goto SW_1_3 + // } + // SW_1_1: + // foo() + // goto SW_1_END + // LABEL: + // ; + // SW_1_2: + // bar() + // SW_1_3: + // baz() + // SW_1_END: + // ; + swEndLabel := p.GetNextIdentifier("SW_GENERATED_LABEL_") + postStmts = append(postStmts, &goast.BranchStmt{ + Label: util.NewIdent(swEndLabel), + Tok: token.GOTO, + }) + for i, x := range cases { + switch c := x.(type) { + case *goast.CaseClause: + caseLabel := p.GetNextIdentifier("SW_GENERATED_LABEL_") + + if len(c.Body) == 0 { + c.Body = append(c.Body, &goast.BranchStmt{ + Tok: token.BREAK, + }) + } + var isFallThrough bool + // Remove fallthrough + if v, ok := c.Body[len(c.Body)-1].(*goast.BranchStmt); ok { + isFallThrough = (v.Tok == token.FALLTHROUGH) + c.Body = c.Body[:len(c.Body)-1] + } + if len(c.Body) == 0 { + c.Body = append(c.Body, &goast.EmptyStmt{}) + } + + // Replace break's with goto swEndLabel + astutil.Apply(c, nil, func(cursor *astutil.Cursor) bool { + if cursor == nil { + return true + } + node := cursor.Node() + if bs, ok := node.(*goast.BranchStmt); ok { + if bs.Tok == token.BREAK { + cursor.Replace(&goast.BranchStmt{ + Label: util.NewIdent(swEndLabel), + Tok: token.GOTO, + }) + } + } + return true + }) + body := c.Body + + // append caseLabel label followed by case body + postStmts = append(postStmts, &goast.LabeledStmt{ + Label: util.NewIdent(caseLabel), + Stmt: body[0], + }) + body = body[1:] + postStmts = append(postStmts, body...) + + // If not last case && no fallthrough goto swEndLabel + if i != len(cases)-1 && !isFallThrough { + postStmts = append(postStmts, &goast.BranchStmt{ + Label: util.NewIdent(swEndLabel), + Tok: token.GOTO, + }) + } + + // In switch case we goto caseLabel + c.Body = []goast.Stmt{ + &goast.BranchStmt{ + Label: util.NewIdent(caseLabel), + Tok: token.GOTO, + }, + } + newCases = append(newCases, c) + case *goast.LabeledStmt: + c.Stmt = &goast.EmptyStmt{} + postStmts = append(postStmts, c) + } + } + postStmts = append(postStmts, &goast.LabeledStmt{ + Label: util.NewIdent(swEndLabel), + Stmt: &goast.EmptyStmt{}, + }) + return +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go new file mode 100644 index 000000000..6b7052b89 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go @@ -0,0 +1,627 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +// This file defines utilities for working with source positions. + +import ( + "fmt" + "go/ast" + "go/token" + "sort" +) + +// PathEnclosingInterval returns the node that encloses the source +// interval [start, end), and all its ancestors up to the AST root. +// +// The definition of "enclosing" used by this function considers +// additional whitespace abutting a node to be enclosed by it. +// In this example: +// +// z := x + y // add them +// <-A-> +// <----B-----> +// +// the ast.BinaryExpr(+) node is considered to enclose interval B +// even though its [Pos()..End()) is actually only interval A. +// This behaviour makes user interfaces more tolerant of imperfect +// input. +// +// This function treats tokens as nodes, though they are not included +// in the result. e.g. PathEnclosingInterval("+") returns the +// enclosing ast.BinaryExpr("x + y"). +// +// If start==end, the 1-char interval following start is used instead. +// +// The 'exact' result is true if the interval contains only path[0] +// and perhaps some adjacent whitespace. It is false if the interval +// overlaps multiple children of path[0], or if it contains only +// interior whitespace of path[0]. +// In this example: +// +// z := x + y // add them +// <--C--> <---E--> +// ^ +// D +// +// intervals C, D and E are inexact. C is contained by the +// z-assignment statement, because it spans three of its children (:=, +// x, +). So too is the 1-char interval D, because it contains only +// interior whitespace of the assignment. E is considered interior +// whitespace of the BlockStmt containing the assignment. +// +// Precondition: [start, end) both lie within the same file as root. +// TODO(adonovan): return (nil, false) in this case and remove precond. +// Requires FileSet; see loader.tokenFileContainsPos. +// +// Postcondition: path is never nil; it always contains at least 'root'. +// +func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { + // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging + + // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). + var visit func(node ast.Node) bool + visit = func(node ast.Node) bool { + path = append(path, node) + + nodePos := node.Pos() + nodeEnd := node.End() + + // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging + + // Intersect [start, end) with interval of node. + if start < nodePos { + start = nodePos + } + if end > nodeEnd { + end = nodeEnd + } + + // Find sole child that contains [start, end). + children := childrenOf(node) + l := len(children) + for i, child := range children { + // [childPos, childEnd) is unaugmented interval of child. + childPos := child.Pos() + childEnd := child.End() + + // [augPos, augEnd) is whitespace-augmented interval of child. + augPos := childPos + augEnd := childEnd + if i > 0 { + augPos = children[i-1].End() // start of preceding whitespace + } + if i < l-1 { + nextChildPos := children[i+1].Pos() + // Does [start, end) lie between child and next child? + if start >= augEnd && end <= nextChildPos { + return false // inexact match + } + augEnd = nextChildPos // end of following whitespace + } + + // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", + // i, augPos, augEnd, start, end) // debugging + + // Does augmented child strictly contain [start, end)? + if augPos <= start && end <= augEnd { + _, isToken := child.(tokenNode) + return isToken || visit(child) + } + + // Does [start, end) overlap multiple children? + // i.e. left-augmented child contains start + // but LR-augmented child does not contain end. + if start < childEnd && end > augEnd { + break + } + } + + // No single child contained [start, end), + // so node is the result. Is it exact? + + // (It's tempting to put this condition before the + // child loop, but it gives the wrong result in the + // case where a node (e.g. ExprStmt) and its sole + // child have equal intervals.) + if start == nodePos && end == nodeEnd { + return true // exact match + } + + return false // inexact: overlaps multiple children + } + + if start > end { + start, end = end, start + } + + if start < root.End() && end > root.Pos() { + if start == end { + end = start + 1 // empty interval => interval of size 1 + } + exact = visit(root) + + // Reverse the path: + for i, l := 0, len(path); i < l/2; i++ { + path[i], path[l-1-i] = path[l-1-i], path[i] + } + } else { + // Selection lies within whitespace preceding the + // first (or following the last) declaration in the file. + // The result nonetheless always includes the ast.File. + path = append(path, root) + } + + return +} + +// tokenNode is a dummy implementation of ast.Node for a single token. +// They are used transiently by PathEnclosingInterval but never escape +// this package. +// +type tokenNode struct { + pos token.Pos + end token.Pos +} + +func (n tokenNode) Pos() token.Pos { + return n.pos +} + +func (n tokenNode) End() token.Pos { + return n.end +} + +func tok(pos token.Pos, len int) ast.Node { + return tokenNode{pos, pos + token.Pos(len)} +} + +// childrenOf returns the direct non-nil children of ast.Node n. +// It may include fake ast.Node implementations for bare tokens. +// it is not safe to call (e.g.) ast.Walk on such nodes. +// +func childrenOf(n ast.Node) []ast.Node { + var children []ast.Node + + // First add nodes for all true subtrees. + ast.Inspect(n, func(node ast.Node) bool { + if node == n { // push n + return true // recur + } + if node != nil { // push child + children = append(children, node) + } + return false // no recursion + }) + + // Then add fake Nodes for bare tokens. + switch n := n.(type) { + case *ast.ArrayType: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Elt.End(), len("]"))) + + case *ast.AssignStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.BasicLit: + children = append(children, + tok(n.ValuePos, len(n.Value))) + + case *ast.BinaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.BlockStmt: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("}"))) + + case *ast.BranchStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.CallExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + if n.Ellipsis != 0 { + children = append(children, tok(n.Ellipsis, len("..."))) + } + + case *ast.CaseClause: + if n.List == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.ChanType: + switch n.Dir { + case ast.RECV: + children = append(children, tok(n.Begin, len("<-chan"))) + case ast.SEND: + children = append(children, tok(n.Begin, len("chan<-"))) + case ast.RECV | ast.SEND: + children = append(children, tok(n.Begin, len("chan"))) + } + + case *ast.CommClause: + if n.Comm == nil { + children = append(children, + tok(n.Case, len("default"))) + } else { + children = append(children, + tok(n.Case, len("case"))) + } + children = append(children, tok(n.Colon, len(":"))) + + case *ast.Comment: + // nop + + case *ast.CommentGroup: + // nop + + case *ast.CompositeLit: + children = append(children, + tok(n.Lbrace, len("{")), + tok(n.Rbrace, len("{"))) + + case *ast.DeclStmt: + // nop + + case *ast.DeferStmt: + children = append(children, + tok(n.Defer, len("defer"))) + + case *ast.Ellipsis: + children = append(children, + tok(n.Ellipsis, len("..."))) + + case *ast.EmptyStmt: + // nop + + case *ast.ExprStmt: + // nop + + case *ast.Field: + // TODO(adonovan): Field.{Doc,Comment,Tag}? + + case *ast.FieldList: + children = append(children, + tok(n.Opening, len("(")), + tok(n.Closing, len(")"))) + + case *ast.File: + // TODO test: Doc + children = append(children, + tok(n.Package, len("package"))) + + case *ast.ForStmt: + children = append(children, + tok(n.For, len("for"))) + + case *ast.FuncDecl: + // TODO(adonovan): FuncDecl.Comment? + + // Uniquely, FuncDecl breaks the invariant that + // preorder traversal yields tokens in lexical order: + // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. + // + // As a workaround, we inline the case for FuncType + // here and order things correctly. + // + children = nil // discard ast.Walk(FuncDecl) info subtrees + children = append(children, tok(n.Type.Func, len("func"))) + if n.Recv != nil { + children = append(children, n.Recv) + } + children = append(children, n.Name) + if n.Type.Params != nil { + children = append(children, n.Type.Params) + } + if n.Type.Results != nil { + children = append(children, n.Type.Results) + } + if n.Body != nil { + children = append(children, n.Body) + } + + case *ast.FuncLit: + // nop + + case *ast.FuncType: + if n.Func != 0 { + children = append(children, + tok(n.Func, len("func"))) + } + + case *ast.GenDecl: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + if n.Lparen != 0 { + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + } + + case *ast.GoStmt: + children = append(children, + tok(n.Go, len("go"))) + + case *ast.Ident: + children = append(children, + tok(n.NamePos, len(n.Name))) + + case *ast.IfStmt: + children = append(children, + tok(n.If, len("if"))) + + case *ast.ImportSpec: + // TODO(adonovan): ImportSpec.{Doc,EndPos}? + + case *ast.IncDecStmt: + children = append(children, + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.IndexExpr: + children = append(children, + tok(n.Lbrack, len("{")), + tok(n.Rbrack, len("}"))) + + case *ast.InterfaceType: + children = append(children, + tok(n.Interface, len("interface"))) + + case *ast.KeyValueExpr: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.LabeledStmt: + children = append(children, + tok(n.Colon, len(":"))) + + case *ast.MapType: + children = append(children, + tok(n.Map, len("map"))) + + case *ast.ParenExpr: + children = append(children, + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.RangeStmt: + children = append(children, + tok(n.For, len("for")), + tok(n.TokPos, len(n.Tok.String()))) + + case *ast.ReturnStmt: + children = append(children, + tok(n.Return, len("return"))) + + case *ast.SelectStmt: + children = append(children, + tok(n.Select, len("select"))) + + case *ast.SelectorExpr: + // nop + + case *ast.SendStmt: + children = append(children, + tok(n.Arrow, len("<-"))) + + case *ast.SliceExpr: + children = append(children, + tok(n.Lbrack, len("[")), + tok(n.Rbrack, len("]"))) + + case *ast.StarExpr: + children = append(children, tok(n.Star, len("*"))) + + case *ast.StructType: + children = append(children, tok(n.Struct, len("struct"))) + + case *ast.SwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.TypeAssertExpr: + children = append(children, + tok(n.Lparen-1, len(".")), + tok(n.Lparen, len("(")), + tok(n.Rparen, len(")"))) + + case *ast.TypeSpec: + // TODO(adonovan): TypeSpec.{Doc,Comment}? + + case *ast.TypeSwitchStmt: + children = append(children, tok(n.Switch, len("switch"))) + + case *ast.UnaryExpr: + children = append(children, tok(n.OpPos, len(n.Op.String()))) + + case *ast.ValueSpec: + // TODO(adonovan): ValueSpec.{Doc,Comment}? + + case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: + // nop + } + + // TODO(adonovan): opt: merge the logic of ast.Inspect() into + // the switch above so we can make interleaved callbacks for + // both Nodes and Tokens in the right order and avoid the need + // to sort. + sort.Sort(byPos(children)) + + return children +} + +type byPos []ast.Node + +func (sl byPos) Len() int { + return len(sl) +} +func (sl byPos) Less(i, j int) bool { + return sl[i].Pos() < sl[j].Pos() +} +func (sl byPos) Swap(i, j int) { + sl[i], sl[j] = sl[j], sl[i] +} + +// NodeDescription returns a description of the concrete type of n suitable +// for a user interface. +// +// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, +// StarExpr) we could be much more specific given the path to the AST +// root. Perhaps we should do that. +// +func NodeDescription(n ast.Node) string { + switch n := n.(type) { + case *ast.ArrayType: + return "array type" + case *ast.AssignStmt: + return "assignment" + case *ast.BadDecl: + return "bad declaration" + case *ast.BadExpr: + return "bad expression" + case *ast.BadStmt: + return "bad statement" + case *ast.BasicLit: + return "basic literal" + case *ast.BinaryExpr: + return fmt.Sprintf("binary %s operation", n.Op) + case *ast.BlockStmt: + return "block" + case *ast.BranchStmt: + switch n.Tok { + case token.BREAK: + return "break statement" + case token.CONTINUE: + return "continue statement" + case token.GOTO: + return "goto statement" + case token.FALLTHROUGH: + return "fall-through statement" + } + case *ast.CallExpr: + if len(n.Args) == 1 && !n.Ellipsis.IsValid() { + return "function call (or conversion)" + } + return "function call" + case *ast.CaseClause: + return "case clause" + case *ast.ChanType: + return "channel type" + case *ast.CommClause: + return "communication clause" + case *ast.Comment: + return "comment" + case *ast.CommentGroup: + return "comment group" + case *ast.CompositeLit: + return "composite literal" + case *ast.DeclStmt: + return NodeDescription(n.Decl) + " statement" + case *ast.DeferStmt: + return "defer statement" + case *ast.Ellipsis: + return "ellipsis" + case *ast.EmptyStmt: + return "empty statement" + case *ast.ExprStmt: + return "expression statement" + case *ast.Field: + // Can be any of these: + // struct {x, y int} -- struct field(s) + // struct {T} -- anon struct field + // interface {I} -- interface embedding + // interface {f()} -- interface method + // func (A) func(B) C -- receiver, param(s), result(s) + return "field/method/parameter" + case *ast.FieldList: + return "field/method/parameter list" + case *ast.File: + return "source file" + case *ast.ForStmt: + return "for loop" + case *ast.FuncDecl: + return "function declaration" + case *ast.FuncLit: + return "function literal" + case *ast.FuncType: + return "function type" + case *ast.GenDecl: + switch n.Tok { + case token.IMPORT: + return "import declaration" + case token.CONST: + return "constant declaration" + case token.TYPE: + return "type declaration" + case token.VAR: + return "variable declaration" + } + case *ast.GoStmt: + return "go statement" + case *ast.Ident: + return "identifier" + case *ast.IfStmt: + return "if statement" + case *ast.ImportSpec: + return "import specification" + case *ast.IncDecStmt: + if n.Tok == token.INC { + return "increment statement" + } + return "decrement statement" + case *ast.IndexExpr: + return "index expression" + case *ast.InterfaceType: + return "interface type" + case *ast.KeyValueExpr: + return "key/value association" + case *ast.LabeledStmt: + return "statement label" + case *ast.MapType: + return "map type" + case *ast.Package: + return "package" + case *ast.ParenExpr: + return "parenthesized " + NodeDescription(n.X) + case *ast.RangeStmt: + return "range loop" + case *ast.ReturnStmt: + return "return statement" + case *ast.SelectStmt: + return "select statement" + case *ast.SelectorExpr: + return "selector" + case *ast.SendStmt: + return "channel send" + case *ast.SliceExpr: + return "slice expression" + case *ast.StarExpr: + return "*-operation" // load/store expr or pointer type + case *ast.StructType: + return "struct type" + case *ast.SwitchStmt: + return "switch statement" + case *ast.TypeAssertExpr: + return "type assertion" + case *ast.TypeSpec: + return "type specification" + case *ast.TypeSwitchStmt: + return "type switch" + case *ast.UnaryExpr: + return fmt.Sprintf("unary %s operation", n.Op) + case *ast.ValueSpec: + return "value specification" + + } + panic(fmt.Sprintf("unexpected node type: %T", n)) +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go new file mode 100644 index 000000000..107f87c55 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go @@ -0,0 +1,195 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil_test + +// This file defines tests of PathEnclosingInterval. + +// TODO(adonovan): exhaustive tests that run over the whole input +// tree, not just handcrafted examples. + +import ( + "bytes" + "fmt" + "go/ast" + "go/parser" + "go/token" + "strings" + "testing" + + "golang.org/x/tools/go/ast/astutil" +) + +// pathToString returns a string containing the concrete types of the +// nodes in path. +func pathToString(path []ast.Node) string { + var buf bytes.Buffer + fmt.Fprint(&buf, "[") + for i, n := range path { + if i > 0 { + fmt.Fprint(&buf, " ") + } + fmt.Fprint(&buf, strings.TrimPrefix(fmt.Sprintf("%T", n), "*ast.")) + } + fmt.Fprint(&buf, "]") + return buf.String() +} + +// findInterval parses input and returns the [start, end) positions of +// the first occurrence of substr in input. f==nil indicates failure; +// an error has already been reported in that case. +// +func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) { + f, err := parser.ParseFile(fset, "", input, 0) + if err != nil { + t.Errorf("parse error: %s", err) + return + } + + i := strings.Index(input, substr) + if i < 0 { + t.Errorf("%q is not a substring of input", substr) + f = nil + return + } + + filePos := fset.File(f.Package) + return f, filePos.Pos(i), filePos.Pos(i + len(substr)) +} + +// Common input for following tests. +const input = ` +// Hello. +package main +import "fmt" +func f() {} +func main() { + z := (x + y) // add them + f() // NB: ExprStmt and its CallExpr have same Pos/End +} +` + +func TestPathEnclosingInterval_Exact(t *testing.T) { + // For the exact tests, we check that a substring is mapped to + // the canonical string for the node it denotes. + tests := []struct { + substr string // first occurrence of this string indicates interval + node string // complete text of expected containing node + }{ + {"package", + input[11 : len(input)-1]}, + {"\npack", + input[11 : len(input)-1]}, + {"main", + "main"}, + {"import", + "import \"fmt\""}, + {"\"fmt\"", + "\"fmt\""}, + {"\nfunc f() {}\n", + "func f() {}"}, + {"x ", + "x"}, + {" y", + "y"}, + {"z", + "z"}, + {" + ", + "x + y"}, + {" :=", + "z := (x + y)"}, + {"x + y", + "x + y"}, + {"(x + y)", + "(x + y)"}, + {" (x + y) ", + "(x + y)"}, + {" (x + y) // add", + "(x + y)"}, + {"func", + "func f() {}"}, + {"func f() {}", + "func f() {}"}, + {"\nfun", + "func f() {}"}, + {" f", + "f"}, + } + for _, test := range tests { + f, start, end := findInterval(t, new(token.FileSet), input, test.substr) + if f == nil { + continue + } + + path, exact := astutil.PathEnclosingInterval(f, start, end) + if !exact { + t.Errorf("PathEnclosingInterval(%q) not exact", test.substr) + continue + } + + if len(path) == 0 { + if test.node != "" { + t.Errorf("PathEnclosingInterval(%q).path: got [], want %q", + test.substr, test.node) + } + continue + } + + if got := input[path[0].Pos():path[0].End()]; got != test.node { + t.Errorf("PathEnclosingInterval(%q): got %q, want %q (path was %s)", + test.substr, got, test.node, pathToString(path)) + continue + } + } +} + +func TestPathEnclosingInterval_Paths(t *testing.T) { + // For these tests, we check only the path of the enclosing + // node, but not its complete text because it's often quite + // large when !exact. + tests := []struct { + substr string // first occurrence of this string indicates interval + path string // the pathToString(),exact of the expected path + }{ + {"// add", + "[BlockStmt FuncDecl File],false"}, + {"(x + y", + "[ParenExpr AssignStmt BlockStmt FuncDecl File],false"}, + {"x +", + "[BinaryExpr ParenExpr AssignStmt BlockStmt FuncDecl File],false"}, + {"z := (x", + "[AssignStmt BlockStmt FuncDecl File],false"}, + {"func f", + "[FuncDecl File],false"}, + {"func f()", + "[FuncDecl File],false"}, + {" f()", + "[FuncDecl File],false"}, + {"() {}", + "[FuncDecl File],false"}, + {"// Hello", + "[File],false"}, + {" f", + "[Ident FuncDecl File],true"}, + {"func ", + "[FuncDecl File],true"}, + {"mai", + "[Ident File],true"}, + {"f() // NB", + "[CallExpr ExprStmt BlockStmt FuncDecl File],true"}, + } + for _, test := range tests { + f, start, end := findInterval(t, new(token.FileSet), input, test.substr) + if f == nil { + continue + } + + path, exact := astutil.PathEnclosingInterval(f, start, end) + if got := fmt.Sprintf("%s,%v", pathToString(path), exact); got != test.path { + t.Errorf("PathEnclosingInterval(%q): got %q, want %q", + test.substr, got, test.path) + continue + } + } +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go new file mode 100644 index 000000000..83f196cd5 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports.go @@ -0,0 +1,470 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package astutil contains common utilities for working with the Go AST. +package astutil // import "golang.org/x/tools/go/ast/astutil" + +import ( + "fmt" + "go/ast" + "go/token" + "strconv" + "strings" +) + +// AddImport adds the import path to the file f, if absent. +func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) { + return AddNamedImport(fset, f, "", ipath) +} + +// AddNamedImport adds the import path to the file f, if absent. +// If name is not empty, it is used to rename the import. +// +// For example, calling +// AddNamedImport(fset, f, "pathpkg", "path") +// adds +// import pathpkg "path" +func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) { + if imports(f, ipath) { + return false + } + + newImport := &ast.ImportSpec{ + Path: &ast.BasicLit{ + Kind: token.STRING, + Value: strconv.Quote(ipath), + }, + } + if name != "" { + newImport.Name = &ast.Ident{Name: name} + } + + // Find an import decl to add to. + // The goal is to find an existing import + // whose import path has the longest shared + // prefix with ipath. + var ( + bestMatch = -1 // length of longest shared prefix + lastImport = -1 // index in f.Decls of the file's final import decl + impDecl *ast.GenDecl // import decl containing the best match + impIndex = -1 // spec index in impDecl containing the best match + + isThirdPartyPath = isThirdParty(ipath) + ) + for i, decl := range f.Decls { + gen, ok := decl.(*ast.GenDecl) + if ok && gen.Tok == token.IMPORT { + lastImport = i + // Do not add to import "C", to avoid disrupting the + // association with its doc comment, breaking cgo. + if declImports(gen, "C") { + continue + } + + // Match an empty import decl if that's all that is available. + if len(gen.Specs) == 0 && bestMatch == -1 { + impDecl = gen + } + + // Compute longest shared prefix with imports in this group and find best + // matched import spec. + // 1. Always prefer import spec with longest shared prefix. + // 2. While match length is 0, + // - for stdlib package: prefer first import spec. + // - for third party package: prefer first third party import spec. + // We cannot use last import spec as best match for third party package + // because grouped imports are usually placed last by goimports -local + // flag. + // See issue #19190. + seenAnyThirdParty := false + for j, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + p := importPath(impspec) + n := matchLen(p, ipath) + if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { + bestMatch = n + impDecl = gen + impIndex = j + } + seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) + } + } + } + + // If no import decl found, add one after the last import. + if impDecl == nil { + impDecl = &ast.GenDecl{ + Tok: token.IMPORT, + } + if lastImport >= 0 { + impDecl.TokPos = f.Decls[lastImport].End() + } else { + // There are no existing imports. + // Our new import goes after the package declaration and after + // the comment, if any, that starts on the same line as the + // package declaration. + impDecl.TokPos = f.Package + + file := fset.File(f.Package) + pkgLine := file.Line(f.Package) + for _, c := range f.Comments { + if file.Line(c.Pos()) > pkgLine { + break + } + impDecl.TokPos = c.End() + } + } + f.Decls = append(f.Decls, nil) + copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) + f.Decls[lastImport+1] = impDecl + } + + // Insert new import at insertAt. + insertAt := 0 + if impIndex >= 0 { + // insert after the found import + insertAt = impIndex + 1 + } + impDecl.Specs = append(impDecl.Specs, nil) + copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) + impDecl.Specs[insertAt] = newImport + pos := impDecl.Pos() + if insertAt > 0 { + // If there is a comment after an existing import, preserve the comment + // position by adding the new import after the comment. + if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { + pos = spec.Comment.End() + } else { + // Assign same position as the previous import, + // so that the sorter sees it as being in the same block. + pos = impDecl.Specs[insertAt-1].Pos() + } + } + if newImport.Name != nil { + newImport.Name.NamePos = pos + } + newImport.Path.ValuePos = pos + newImport.EndPos = pos + + // Clean up parens. impDecl contains at least one spec. + if len(impDecl.Specs) == 1 { + // Remove unneeded parens. + impDecl.Lparen = token.NoPos + } else if !impDecl.Lparen.IsValid() { + // impDecl needs parens added. + impDecl.Lparen = impDecl.Specs[0].Pos() + } + + f.Imports = append(f.Imports, newImport) + + if len(f.Decls) <= 1 { + return true + } + + // Merge all the import declarations into the first one. + var first *ast.GenDecl + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { + continue + } + if first == nil { + first = gen + continue // Don't touch the first one. + } + // We now know there is more than one package in this import + // declaration. Ensure that it ends up parenthesized. + first.Lparen = first.Pos() + // Move the imports of the other import declaration to the first one. + for _, spec := range gen.Specs { + spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() + first.Specs = append(first.Specs, spec) + } + f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) + i-- + } + + return true +} + +func isThirdParty(importPath string) bool { + // Third party package import path usually contains "." (".com", ".org", ...) + // This logic is taken from golang.org/x/tools/imports package. + return strings.Contains(importPath, ".") +} + +// DeleteImport deletes the import path from the file f, if present. +func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { + return DeleteNamedImport(fset, f, "", path) +} + +// DeleteNamedImport deletes the import with the given name and path from the file f, if present. +func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { + var delspecs []*ast.ImportSpec + var delcomments []*ast.CommentGroup + + // Find the import nodes that import path, if any. + for i := 0; i < len(f.Decls); i++ { + decl := f.Decls[i] + gen, ok := decl.(*ast.GenDecl) + if !ok || gen.Tok != token.IMPORT { + continue + } + for j := 0; j < len(gen.Specs); j++ { + spec := gen.Specs[j] + impspec := spec.(*ast.ImportSpec) + if impspec.Name == nil && name != "" { + continue + } + if impspec.Name != nil && impspec.Name.Name != name { + continue + } + if importPath(impspec) != path { + continue + } + + // We found an import spec that imports path. + // Delete it. + delspecs = append(delspecs, impspec) + deleted = true + copy(gen.Specs[j:], gen.Specs[j+1:]) + gen.Specs = gen.Specs[:len(gen.Specs)-1] + + // If this was the last import spec in this decl, + // delete the decl, too. + if len(gen.Specs) == 0 { + copy(f.Decls[i:], f.Decls[i+1:]) + f.Decls = f.Decls[:len(f.Decls)-1] + i-- + break + } else if len(gen.Specs) == 1 { + if impspec.Doc != nil { + delcomments = append(delcomments, impspec.Doc) + } + if impspec.Comment != nil { + delcomments = append(delcomments, impspec.Comment) + } + for _, cg := range f.Comments { + // Found comment on the same line as the import spec. + if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { + delcomments = append(delcomments, cg) + break + } + } + + spec := gen.Specs[0].(*ast.ImportSpec) + + // Move the documentation right after the import decl. + if spec.Doc != nil { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + } + for _, cg := range f.Comments { + if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { + for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { + fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) + } + break + } + } + } + if j > 0 { + lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) + lastLine := fset.Position(lastImpspec.Path.ValuePos).Line + line := fset.Position(impspec.Path.ValuePos).Line + + // We deleted an entry but now there may be + // a blank line-sized hole where the import was. + if line-lastLine > 1 { + // There was a blank line immediately preceding the deleted import, + // so there's no need to close the hole. + // Do nothing. + } else if line != fset.File(gen.Rparen).LineCount() { + // There was no blank line. Close the hole. + fset.File(gen.Rparen).MergeLine(line) + } + } + j-- + } + } + + // Delete imports from f.Imports. + for i := 0; i < len(f.Imports); i++ { + imp := f.Imports[i] + for j, del := range delspecs { + if imp == del { + copy(f.Imports[i:], f.Imports[i+1:]) + f.Imports = f.Imports[:len(f.Imports)-1] + copy(delspecs[j:], delspecs[j+1:]) + delspecs = delspecs[:len(delspecs)-1] + i-- + break + } + } + } + + // Delete comments from f.Comments. + for i := 0; i < len(f.Comments); i++ { + cg := f.Comments[i] + for j, del := range delcomments { + if cg == del { + copy(f.Comments[i:], f.Comments[i+1:]) + f.Comments = f.Comments[:len(f.Comments)-1] + copy(delcomments[j:], delcomments[j+1:]) + delcomments = delcomments[:len(delcomments)-1] + i-- + break + } + } + } + + if len(delspecs) > 0 { + panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) + } + + return +} + +// RewriteImport rewrites any import of path oldPath to path newPath. +func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { + for _, imp := range f.Imports { + if importPath(imp) == oldPath { + rewrote = true + // record old End, because the default is to compute + // it using the length of imp.Path.Value. + imp.EndPos = imp.End() + imp.Path.Value = strconv.Quote(newPath) + } + } + return +} + +// UsesImport reports whether a given import is used. +func UsesImport(f *ast.File, path string) (used bool) { + spec := importSpec(f, path) + if spec == nil { + return + } + + name := spec.Name.String() + switch name { + case "": + // If the package name is not explicitly specified, + // make an educated guess. This is not guaranteed to be correct. + lastSlash := strings.LastIndex(path, "/") + if lastSlash == -1 { + name = path + } else { + name = path[lastSlash+1:] + } + case "_", ".": + // Not sure if this import is used - err on the side of caution. + return true + } + + ast.Walk(visitFn(func(n ast.Node) { + sel, ok := n.(*ast.SelectorExpr) + if ok && isTopName(sel.X, name) { + used = true + } + }), f) + + return +} + +type visitFn func(node ast.Node) + +func (fn visitFn) Visit(node ast.Node) ast.Visitor { + fn(node) + return fn +} + +// imports returns true if f imports path. +func imports(f *ast.File, path string) bool { + return importSpec(f, path) != nil +} + +// importSpec returns the import spec if f imports path, +// or nil otherwise. +func importSpec(f *ast.File, path string) *ast.ImportSpec { + for _, s := range f.Imports { + if importPath(s) == path { + return s + } + } + return nil +} + +// importPath returns the unquoted import path of s, +// or "" if the path is not properly quoted. +func importPath(s *ast.ImportSpec) string { + t, err := strconv.Unquote(s.Path.Value) + if err == nil { + return t + } + return "" +} + +// declImports reports whether gen contains an import of path. +func declImports(gen *ast.GenDecl, path string) bool { + if gen.Tok != token.IMPORT { + return false + } + for _, spec := range gen.Specs { + impspec := spec.(*ast.ImportSpec) + if importPath(impspec) == path { + return true + } + } + return false +} + +// matchLen returns the length of the longest path segment prefix shared by x and y. +func matchLen(x, y string) int { + n := 0 + for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { + if x[i] == '/' { + n++ + } + } + return n +} + +// isTopName returns true if n is a top-level unresolved identifier with the given name. +func isTopName(n ast.Expr, name string) bool { + id, ok := n.(*ast.Ident) + return ok && id.Name == name && id.Obj == nil +} + +// Imports returns the file imports grouped by paragraph. +func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { + var groups [][]*ast.ImportSpec + + for _, decl := range f.Decls { + genDecl, ok := decl.(*ast.GenDecl) + if !ok || genDecl.Tok != token.IMPORT { + break + } + + group := []*ast.ImportSpec{} + + var lastLine int + for _, spec := range genDecl.Specs { + importSpec := spec.(*ast.ImportSpec) + pos := importSpec.Path.ValuePos + line := fset.Position(pos).Line + if lastLine > 0 && pos > 0 && line-lastLine > 1 { + groups = append(groups, group) + group = []*ast.ImportSpec{} + } + group = append(group, importSpec) + lastLine = line + } + groups = append(groups, group) + } + + return groups +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go b/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go new file mode 100644 index 000000000..8bc348087 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go @@ -0,0 +1,1818 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "bytes" + "go/ast" + "go/format" + "go/parser" + "go/token" + "reflect" + "strconv" + "testing" +) + +var fset = token.NewFileSet() + +func parse(t *testing.T, name, in string) *ast.File { + file, err := parser.ParseFile(fset, name, in, parser.ParseComments) + if err != nil { + t.Fatalf("%s parse: %v", name, err) + } + return file +} + +func print(t *testing.T, name string, f *ast.File) string { + var buf bytes.Buffer + if err := format.Node(&buf, fset, f); err != nil { + t.Fatalf("%s gofmt: %v", name, err) + } + return string(buf.Bytes()) +} + +type test struct { + name string + renamedPkg string + pkg string + in string + out string + broken bool // known broken +} + +var addTests = []test{ + { + name: "leave os alone", + pkg: "os", + in: `package main + +import ( + "os" +) +`, + out: `package main + +import ( + "os" +) +`, + }, + { + name: "import.1", + pkg: "os", + in: `package main +`, + out: `package main + +import "os" +`, + }, + { + name: "import.2", + pkg: "os", + in: `package main + +// Comment +import "C" +`, + out: `package main + +// Comment +import "C" +import "os" +`, + }, + { + name: "import.3", + pkg: "os", + in: `package main + +// Comment +import "C" + +import ( + "io" + "utf8" +) +`, + out: `package main + +// Comment +import "C" + +import ( + "io" + "os" + "utf8" +) +`, + }, + { + name: "import.17", + pkg: "x/y/z", + in: `package main + +// Comment +import "C" + +import ( + "a" + "b" + + "x/w" + + "d/f" +) +`, + out: `package main + +// Comment +import "C" + +import ( + "a" + "b" + + "x/w" + "x/y/z" + + "d/f" +) +`, + }, + { + name: "issue #19190", + pkg: "x.org/y/z", + in: `package main + +// Comment +import "C" + +import ( + "bytes" + "os" + + "d.com/f" +) +`, + out: `package main + +// Comment +import "C" + +import ( + "bytes" + "os" + + "d.com/f" + "x.org/y/z" +) +`, + }, + { + name: "issue #19190 with existing grouped import packages", + pkg: "x.org/y/z", + in: `package main + +// Comment +import "C" + +import ( + "bytes" + "os" + + "c.com/f" + "d.com/f" + + "y.com/a" + "y.com/b" + "y.com/c" +) +`, + out: `package main + +// Comment +import "C" + +import ( + "bytes" + "os" + + "c.com/f" + "d.com/f" + "x.org/y/z" + + "y.com/a" + "y.com/b" + "y.com/c" +) +`, + }, + { + name: "issue #19190 - match score is still respected", + pkg: "y.org/c", + in: `package main + +import ( + "x.org/a" + + "y.org/b" +) +`, + out: `package main + +import ( + "x.org/a" + + "y.org/b" + "y.org/c" +) +`, + }, + { + name: "import into singular group", + pkg: "bytes", + in: `package main + +import "os" + +`, + out: `package main + +import ( + "bytes" + "os" +) +`, + }, + { + name: "import into singular group with comment", + pkg: "bytes", + in: `package main + +import /* why */ /* comment here? */ "os" + +`, + out: `package main + +import /* why */ /* comment here? */ ( + "bytes" + "os" +) +`, + }, + { + name: "import into group with leading comment", + pkg: "strings", + in: `package main + +import ( + // comment before bytes + "bytes" + "os" +) + +`, + out: `package main + +import ( + // comment before bytes + "bytes" + "os" + "strings" +) +`, + }, + { + name: "", + renamedPkg: "fmtpkg", + pkg: "fmt", + in: `package main + +import "os" + +`, + out: `package main + +import ( + fmtpkg "fmt" + "os" +) +`, + }, + { + name: "struct comment", + pkg: "time", + in: `package main + +// This is a comment before a struct. +type T struct { + t time.Time +} +`, + out: `package main + +import "time" + +// This is a comment before a struct. +type T struct { + t time.Time +} +`, + }, + { + name: "issue 8729 import C", + pkg: "time", + in: `package main + +import "C" + +// comment +type T time.Time +`, + out: `package main + +import "C" +import "time" + +// comment +type T time.Time +`, + }, + { + name: "issue 8729 empty import", + pkg: "time", + in: `package main + +import () + +// comment +type T time.Time +`, + out: `package main + +import "time" + +// comment +type T time.Time +`, + }, + { + name: "issue 8729 comment on package line", + pkg: "time", + in: `package main // comment + +type T time.Time +`, + out: `package main // comment +import "time" + +type T time.Time +`, + }, + { + name: "issue 8729 comment after package", + pkg: "time", + in: `package main +// comment + +type T time.Time +`, + out: `package main + +import "time" + +// comment + +type T time.Time +`, + }, + { + name: "issue 8729 comment before and on package line", + pkg: "time", + in: `// comment before +package main // comment on + +type T time.Time +`, + out: `// comment before +package main // comment on +import "time" + +type T time.Time +`, + }, + + // Issue 9961: Match prefixes using path segments rather than bytes + { + name: "issue 9961", + pkg: "regexp", + in: `package main + +import ( + "flag" + "testing" + + "rsc.io/p" +) +`, + out: `package main + +import ( + "flag" + "regexp" + "testing" + + "rsc.io/p" +) +`, + }, + // Issue 10337: Preserve comment position + { + name: "issue 10337", + pkg: "fmt", + in: `package main + +import ( + "bytes" // a + "log" // c +) +`, + out: `package main + +import ( + "bytes" // a + "fmt" + "log" // c +) +`, + }, + { + name: "issue 10337 new import at the start", + pkg: "bytes", + in: `package main + +import ( + "fmt" // b + "log" // c +) +`, + out: `package main + +import ( + "bytes" + "fmt" // b + "log" // c +) +`, + }, + { + name: "issue 10337 new import at the end", + pkg: "log", + in: `package main + +import ( + "bytes" // a + "fmt" // b +) +`, + out: `package main + +import ( + "bytes" // a + "fmt" // b + "log" +) +`, + }, + // Issue 14075: Merge import declarations + { + name: "issue 14075", + pkg: "bufio", + in: `package main + +import "bytes" +import "fmt" +`, + out: `package main + +import ( + "bufio" + "bytes" + "fmt" +) +`, + }, + { + name: "issue 14075 update position", + pkg: "bufio", + in: `package main + +import "bytes" +import ( + "fmt" +) +`, + out: `package main + +import ( + "bufio" + "bytes" + "fmt" +) +`, + }, + { + name: `issue 14075 ignore import "C"`, + pkg: "bufio", + in: `package main + +// Comment +import "C" + +import "bytes" +import "fmt" +`, + out: `package main + +// Comment +import "C" + +import ( + "bufio" + "bytes" + "fmt" +) +`, + }, + { + name: `issue 14075 ignore adjacent import "C"`, + pkg: "bufio", + in: `package main + +// Comment +import "C" +import "fmt" +`, + out: `package main + +// Comment +import "C" +import ( + "bufio" + "fmt" +) +`, + }, + { + name: `issue 14075 ignore adjacent import "C" (without factored import)`, + pkg: "bufio", + in: `package main + +// Comment +import "C" +import "fmt" +`, + out: `package main + +// Comment +import "C" +import ( + "bufio" + "fmt" +) +`, + }, + { + name: `issue 14075 ignore single import "C"`, + pkg: "bufio", + in: `package main + +// Comment +import "C" +`, + out: `package main + +// Comment +import "C" +import "bufio" +`, + }, + { + name: `issue 17212 several single-import lines with shared prefix ending in a slash`, + pkg: "net/http", + in: `package main + +import "bufio" +import "net/url" +`, + out: `package main + +import ( + "bufio" + "net/http" + "net/url" +) +`, + }, + { + name: `issue 17212 block imports lines with shared prefix ending in a slash`, + pkg: "net/http", + in: `package main + +import ( + "bufio" + "net/url" +) +`, + out: `package main + +import ( + "bufio" + "net/http" + "net/url" +) +`, + }, + { + name: `issue 17213 many single-import lines`, + pkg: "fmt", + in: `package main + +import "bufio" +import "bytes" +import "errors" +`, + out: `package main + +import ( + "bufio" + "bytes" + "errors" + "fmt" +) +`, + }, +} + +func TestAddImport(t *testing.T) { + for _, test := range addTests { + file := parse(t, test.name, test.in) + var before bytes.Buffer + ast.Fprint(&before, fset, file, nil) + AddNamedImport(fset, file, test.renamedPkg, test.pkg) + if got := print(t, test.name, file); got != test.out { + if test.broken { + t.Logf("%s is known broken:\ngot: %s\nwant: %s", test.name, got, test.out) + } else { + t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out) + } + var after bytes.Buffer + ast.Fprint(&after, fset, file, nil) + + t.Logf("AST before:\n%s\nAST after:\n%s\n", before.String(), after.String()) + } + } +} + +func TestDoubleAddImport(t *testing.T) { + file := parse(t, "doubleimport", "package main\n") + AddImport(fset, file, "os") + AddImport(fset, file, "bytes") + want := `package main + +import ( + "bytes" + "os" +) +` + if got := print(t, "doubleimport", file); got != want { + t.Errorf("got: %s\nwant: %s", got, want) + } +} + +func TestDoubleAddNamedImport(t *testing.T) { + file := parse(t, "doublenamedimport", "package main\n") + AddNamedImport(fset, file, "o", "os") + AddNamedImport(fset, file, "i", "io") + want := `package main + +import ( + i "io" + o "os" +) +` + if got := print(t, "doublenamedimport", file); got != want { + t.Errorf("got: %s\nwant: %s", got, want) + } +} + +// Part of issue 8729. +func TestDoubleAddImportWithDeclComment(t *testing.T) { + file := parse(t, "doubleimport", `package main + +import ( +) + +// comment +type I int +`) + // The AddImport order here matters. + AddImport(fset, file, "golang.org/x/tools/go/ast/astutil") + AddImport(fset, file, "os") + want := `package main + +import ( + "golang.org/x/tools/go/ast/astutil" + "os" +) + +// comment +type I int +` + if got := print(t, "doubleimport_with_decl_comment", file); got != want { + t.Errorf("got: %s\nwant: %s", got, want) + } +} + +var deleteTests = []test{ + { + name: "import.4", + pkg: "os", + in: `package main + +import ( + "os" +) +`, + out: `package main +`, + }, + { + name: "import.5", + pkg: "os", + in: `package main + +// Comment +import "C" +import "os" +`, + out: `package main + +// Comment +import "C" +`, + }, + { + name: "import.6", + pkg: "os", + in: `package main + +// Comment +import "C" + +import ( + "io" + "os" + "utf8" +) +`, + out: `package main + +// Comment +import "C" + +import ( + "io" + "utf8" +) +`, + }, + { + name: "import.7", + pkg: "io", + in: `package main + +import ( + "io" // a + "os" // b + "utf8" // c +) +`, + out: `package main + +import ( + // a + "os" // b + "utf8" // c +) +`, + }, + { + name: "import.8", + pkg: "os", + in: `package main + +import ( + "io" // a + "os" // b + "utf8" // c +) +`, + out: `package main + +import ( + "io" // a + // b + "utf8" // c +) +`, + }, + { + name: "import.9", + pkg: "utf8", + in: `package main + +import ( + "io" // a + "os" // b + "utf8" // c +) +`, + out: `package main + +import ( + "io" // a + "os" // b + // c +) +`, + }, + { + name: "import.10", + pkg: "io", + in: `package main + +import ( + "io" + "os" + "utf8" +) +`, + out: `package main + +import ( + "os" + "utf8" +) +`, + }, + { + name: "import.11", + pkg: "os", + in: `package main + +import ( + "io" + "os" + "utf8" +) +`, + out: `package main + +import ( + "io" + "utf8" +) +`, + }, + { + name: "import.12", + pkg: "utf8", + in: `package main + +import ( + "io" + "os" + "utf8" +) +`, + out: `package main + +import ( + "io" + "os" +) +`, + }, + { + name: "handle.raw.quote.imports", + pkg: "os", + in: "package main\n\nimport `os`", + out: `package main +`, + }, + { + name: "import.13", + pkg: "io", + in: `package main + +import ( + "fmt" + + "io" + "os" + "utf8" + + "go/format" +) +`, + out: `package main + +import ( + "fmt" + + "os" + "utf8" + + "go/format" +) +`, + }, + { + name: "import.14", + pkg: "io", + in: `package main + +import ( + "fmt" // a + + "io" // b + "os" // c + "utf8" // d + + "go/format" // e +) +`, + out: `package main + +import ( + "fmt" // a + + // b + "os" // c + "utf8" // d + + "go/format" // e +) +`, + }, + { + name: "import.15", + pkg: "double", + in: `package main + +import ( + "double" + "double" +) +`, + out: `package main +`, + }, + { + name: "import.16", + pkg: "bubble", + in: `package main + +import ( + "toil" + "bubble" + "bubble" + "trouble" +) +`, + out: `package main + +import ( + "toil" + "trouble" +) +`, + }, + { + name: "import.17", + pkg: "quad", + in: `package main + +import ( + "quad" + "quad" +) + +import ( + "quad" + "quad" +) +`, + out: `package main +`, + }, + { + name: "import.18", + renamedPkg: "x", + pkg: "fmt", + in: `package main + +import ( + "fmt" + x "fmt" +) +`, + out: `package main + +import ( + "fmt" +) +`, + }, + { + name: "import.18", + renamedPkg: "x", + pkg: "fmt", + in: `package main + +import x "fmt" +import y "fmt" +`, + out: `package main + +import y "fmt" +`, + }, + // Issue #15432, #18051 + { + name: "import.19", + pkg: "fmt", + in: `package main + +import ( + "fmt" + + // Some comment. + "io" +)`, + out: `package main + +import ( + // Some comment. + "io" +) +`, + }, + { + name: "import.20", + pkg: "fmt", + in: `package main + +import ( + "fmt" + + // Some + // comment. + "io" +)`, + out: `package main + +import ( + // Some + // comment. + "io" +) +`, + }, + { + name: "import.21", + pkg: "fmt", + in: `package main + +import ( + "fmt" + + /* + Some + comment. + */ + "io" +)`, + out: `package main + +import ( + /* + Some + comment. + */ + "io" +) +`, + }, + { + name: "import.22", + pkg: "fmt", + in: `package main + +import ( + /* Some */ + // comment. + "io" + "fmt" +)`, + out: `package main + +import ( + /* Some */ + // comment. + "io" +) +`, + }, + { + name: "import.23", + pkg: "fmt", + in: `package main + +import ( + // comment 1 + "fmt" + // comment 2 + "io" +)`, + out: `package main + +import ( + // comment 2 + "io" +) +`, + }, + { + name: "import.24", + pkg: "fmt", + in: `package main + +import ( + "fmt" // comment 1 + "io" // comment 2 +)`, + out: `package main + +import ( + "io" // comment 2 +) +`, + }, + { + name: "import.25", + pkg: "fmt", + in: `package main + +import ( + "fmt" + /* comment */ "io" +)`, + out: `package main + +import ( + /* comment */ "io" +) +`, + }, + { + name: "import.26", + pkg: "fmt", + in: `package main + +import ( + "fmt" + "io" /* comment */ +)`, + out: `package main + +import ( + "io" /* comment */ +) +`, + }, + { + name: "import.27", + pkg: "fmt", + in: `package main + +import ( + "fmt" /* comment */ + "io" +)`, + out: `package main + +import ( + "io" +) +`, + }, + { + name: "import.28", + pkg: "fmt", + in: `package main + +import ( + /* comment */ "fmt" + "io" +)`, + out: `package main + +import ( + "io" +) +`, + }, + { + name: "import.29", + pkg: "fmt", + in: `package main + +// comment 1 +import ( + "fmt" + "io" // comment 2 +)`, + out: `package main + +// comment 1 +import ( + "io" // comment 2 +) +`, + }, + { + name: "import.30", + pkg: "fmt", + in: `package main + +// comment 1 +import ( + "fmt" // comment 2 + "io" +)`, + out: `package main + +// comment 1 +import ( + "io" +) +`, + }, + { + name: "import.31", + pkg: "fmt", + in: `package main + +// comment 1 +import ( + "fmt" + /* comment 2 */ "io" +)`, + out: `package main + +// comment 1 +import ( + /* comment 2 */ "io" +) +`, + }, + { + name: "import.32", + pkg: "fmt", + renamedPkg: "f", + in: `package main + +// comment 1 +import ( + f "fmt" + /* comment 2 */ i "io" +)`, + out: `package main + +// comment 1 +import ( + /* comment 2 */ i "io" +) +`, + }, + { + name: "import.33", + pkg: "fmt", + renamedPkg: "f", + in: `package main + +// comment 1 +import ( + /* comment 2 */ f "fmt" + i "io" +)`, + out: `package main + +// comment 1 +import ( + i "io" +) +`, + }, + { + name: "import.34", + pkg: "fmt", + renamedPkg: "f", + in: `package main + +// comment 1 +import ( + f "fmt" /* comment 2 */ + i "io" +)`, + out: `package main + +// comment 1 +import ( + i "io" +) +`, + }, + { + name: "import.35", + pkg: "fmt", + in: `package main + +// comment 1 +import ( + "fmt" + // comment 2 + "io" +)`, + out: `package main + +// comment 1 +import ( + // comment 2 + "io" +) +`, + }, + { + name: "import.36", + pkg: "fmt", + in: `package main + +/* comment 1 */ +import ( + "fmt" + /* comment 2 */ + "io" +)`, + out: `package main + +/* comment 1 */ +import ( + /* comment 2 */ + "io" +) +`, + }, + + // Issue 20229: MergeLine panic on weird input + { + name: "import.37", + pkg: "io", + in: `package main +import("_" +"io")`, + out: `package main + +import ( + "_" +) +`, + }, +} + +func TestDeleteImport(t *testing.T) { + for _, test := range deleteTests { + file := parse(t, test.name, test.in) + DeleteNamedImport(fset, file, test.renamedPkg, test.pkg) + if got := print(t, test.name, file); got != test.out { + t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out) + } + } +} + +type rewriteTest struct { + name string + srcPkg string + dstPkg string + in string + out string +} + +var rewriteTests = []rewriteTest{ + { + name: "import.13", + srcPkg: "utf8", + dstPkg: "encoding/utf8", + in: `package main + +import ( + "io" + "os" + "utf8" // thanks ken +) +`, + out: `package main + +import ( + "encoding/utf8" // thanks ken + "io" + "os" +) +`, + }, + { + name: "import.14", + srcPkg: "asn1", + dstPkg: "encoding/asn1", + in: `package main + +import ( + "asn1" + "crypto" + "crypto/rsa" + _ "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "time" +) + +var x = 1 +`, + out: `package main + +import ( + "crypto" + "crypto/rsa" + _ "crypto/sha1" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "time" +) + +var x = 1 +`, + }, + { + name: "import.15", + srcPkg: "url", + dstPkg: "net/url", + in: `package main + +import ( + "bufio" + "net" + "path" + "url" +) + +var x = 1 // comment on x, not on url +`, + out: `package main + +import ( + "bufio" + "net" + "net/url" + "path" +) + +var x = 1 // comment on x, not on url +`, + }, + { + name: "import.16", + srcPkg: "http", + dstPkg: "net/http", + in: `package main + +import ( + "flag" + "http" + "log" + "text/template" +) + +var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 +`, + out: `package main + +import ( + "flag" + "log" + "net/http" + "text/template" +) + +var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 +`, + }, +} + +func TestRewriteImport(t *testing.T) { + for _, test := range rewriteTests { + file := parse(t, test.name, test.in) + RewriteImport(fset, file, test.srcPkg, test.dstPkg) + if got := print(t, test.name, file); got != test.out { + t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out) + } + } +} + +var importsTests = []struct { + name string + in string + want [][]string +}{ + { + name: "no packages", + in: `package foo +`, + want: nil, + }, + { + name: "one group", + in: `package foo + +import ( + "fmt" + "testing" +) +`, + want: [][]string{{"fmt", "testing"}}, + }, + { + name: "four groups", + in: `package foo + +import "C" +import ( + "fmt" + "testing" + + "appengine" + + "myproject/mylib1" + "myproject/mylib2" +) +`, + want: [][]string{ + {"C"}, + {"fmt", "testing"}, + {"appengine"}, + {"myproject/mylib1", "myproject/mylib2"}, + }, + }, + { + name: "multiple factored groups", + in: `package foo + +import ( + "fmt" + "testing" + + "appengine" +) +import ( + "reflect" + + "bytes" +) +`, + want: [][]string{ + {"fmt", "testing"}, + {"appengine"}, + {"reflect"}, + {"bytes"}, + }, + }, +} + +func unquote(s string) string { + res, err := strconv.Unquote(s) + if err != nil { + return "could_not_unquote" + } + return res +} + +func TestImports(t *testing.T) { + fset := token.NewFileSet() + for _, test := range importsTests { + f, err := parser.ParseFile(fset, "test.go", test.in, 0) + if err != nil { + t.Errorf("%s: %v", test.name, err) + continue + } + var got [][]string + for _, group := range Imports(fset, f) { + var b []string + for _, spec := range group { + b = append(b, unquote(spec.Path.Value)) + } + got = append(got, b) + } + if !reflect.DeepEqual(got, test.want) { + t.Errorf("Imports(%s)=%v, want %v", test.name, got, test.want) + } + } +} + +var usesImportTests = []struct { + name string + path string + in string + want bool +}{ + { + name: "no packages", + path: "io", + in: `package foo +`, + want: false, + }, + { + name: "import.1", + path: "io", + in: `package foo + +import "io" + +var _ io.Writer +`, + want: true, + }, + { + name: "import.2", + path: "io", + in: `package foo + +import "io" +`, + want: false, + }, + { + name: "import.3", + path: "io", + in: `package foo + +import "io" + +var io = 42 +`, + want: false, + }, + { + name: "import.4", + path: "io", + in: `package foo + +import i "io" + +var _ i.Writer +`, + want: true, + }, + { + name: "import.5", + path: "io", + in: `package foo + +import i "io" +`, + want: false, + }, + { + name: "import.6", + path: "io", + in: `package foo + +import i "io" + +var i = 42 +var io = 42 +`, + want: false, + }, + { + name: "import.7", + path: "encoding/json", + in: `package foo + +import "encoding/json" + +var _ json.Encoder +`, + want: true, + }, + { + name: "import.8", + path: "encoding/json", + in: `package foo + +import "encoding/json" +`, + want: false, + }, + { + name: "import.9", + path: "encoding/json", + in: `package foo + +import "encoding/json" + +var json = 42 +`, + want: false, + }, + { + name: "import.10", + path: "encoding/json", + in: `package foo + +import j "encoding/json" + +var _ j.Encoder +`, + want: true, + }, + { + name: "import.11", + path: "encoding/json", + in: `package foo + +import j "encoding/json" +`, + want: false, + }, + { + name: "import.12", + path: "encoding/json", + in: `package foo + +import j "encoding/json" + +var j = 42 +var json = 42 +`, + want: false, + }, + { + name: "import.13", + path: "io", + in: `package foo + +import _ "io" +`, + want: true, + }, + { + name: "import.14", + path: "io", + in: `package foo + +import . "io" +`, + want: true, + }, +} + +func TestUsesImport(t *testing.T) { + fset := token.NewFileSet() + for _, test := range usesImportTests { + f, err := parser.ParseFile(fset, "test.go", test.in, 0) + if err != nil { + t.Errorf("%s: %v", test.name, err) + continue + } + got := UsesImport(f, test.path) + if got != test.want { + t.Errorf("UsesImport(%s)=%v, want %v", test.name, got, test.want) + } + } +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go new file mode 100644 index 000000000..cf72ea990 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go @@ -0,0 +1,477 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil + +import ( + "fmt" + "go/ast" + "reflect" + "sort" +) + +// An ApplyFunc is invoked by Apply for each node n, even if n is nil, +// before and/or after the node's children, using a Cursor describing +// the current node and providing operations on it. +// +// The return value of ApplyFunc controls the syntax tree traversal. +// See Apply for details. +type ApplyFunc func(*Cursor) bool + +// Apply traverses a syntax tree recursively, starting with root, +// and calling pre and post for each node as described below. +// Apply returns the syntax tree, possibly modified. +// +// If pre is not nil, it is called for each node before the node's +// children are traversed (pre-order). If pre returns false, no +// children are traversed, and post is not called for that node. +// +// If post is not nil, and a prior call of pre didn't return false, +// post is called for each node after its children are traversed +// (post-order). If post returns false, traversal is terminated and +// Apply returns immediately. +// +// Only fields that refer to AST nodes are considered children; +// i.e., token.Pos, Scopes, Objects, and fields of basic types +// (strings, etc.) are ignored. +// +// Children are traversed in the order in which they appear in the +// respective node's struct definition. A package's files are +// traversed in the filenames' alphabetical order. +// +func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { + parent := &struct{ ast.Node }{root} + defer func() { + if r := recover(); r != nil && r != abort { + panic(r) + } + result = parent.Node + }() + a := &application{pre: pre, post: post} + a.apply(parent, "Node", nil, root) + return +} + +var abort = new(int) // singleton, to signal termination of Apply + +// A Cursor describes a node encountered during Apply. +// Information about the node and its parent is available +// from the Node, Parent, Name, and Index methods. +// +// If p is a variable of type and value of the current parent node +// c.Parent(), and f is the field identifier with name c.Name(), +// the following invariants hold: +// +// p.f == c.Node() if c.Index() < 0 +// p.f[c.Index()] == c.Node() if c.Index() >= 0 +// +// The methods Replace, Delete, InsertBefore, and InsertAfter +// can be used to change the AST without disrupting Apply. +type Cursor struct { + parent ast.Node + name string + iter *iterator // valid if non-nil + node ast.Node +} + +// Node returns the current Node. +func (c *Cursor) Node() ast.Node { return c.node } + +// Parent returns the parent of the current Node. +func (c *Cursor) Parent() ast.Node { return c.parent } + +// Name returns the name of the parent Node field that contains the current Node. +// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns +// the filename for the current Node. +func (c *Cursor) Name() string { return c.name } + +// Index reports the index >= 0 of the current Node in the slice of Nodes that +// contains it, or a value < 0 if the current Node is not part of a slice. +// The index of the current node changes if InsertBefore is called while +// processing the current node. +func (c *Cursor) Index() int { + if c.iter != nil { + return c.iter.index + } + return -1 +} + +// field returns the current node's parent field value. +func (c *Cursor) field() reflect.Value { + return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) +} + +// Replace replaces the current Node with n. +// The replacement node is not walked by Apply. +func (c *Cursor) Replace(n ast.Node) { + if _, ok := c.node.(*ast.File); ok { + file, ok := n.(*ast.File) + if !ok { + panic("attempt to replace *ast.File with non-*ast.File") + } + c.parent.(*ast.Package).Files[c.name] = file + return + } + + v := c.field() + if i := c.Index(); i >= 0 { + v = v.Index(i) + } + v.Set(reflect.ValueOf(n)) +} + +// Delete deletes the current Node from its containing slice. +// If the current Node is not part of a slice, Delete panics. +// As a special case, if the current node is a package file, +// Delete removes it from the package's Files map. +func (c *Cursor) Delete() { + if _, ok := c.node.(*ast.File); ok { + delete(c.parent.(*ast.Package).Files, c.name) + return + } + + i := c.Index() + if i < 0 { + panic("Delete node not contained in slice") + } + v := c.field() + l := v.Len() + reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) + v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) + v.SetLen(l - 1) + c.iter.step-- +} + +// InsertAfter inserts n after the current Node in its containing slice. +// If the current Node is not part of a slice, InsertAfter panics. +// Apply does not walk n. +func (c *Cursor) InsertAfter(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertAfter node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) + v.Index(i + 1).Set(reflect.ValueOf(n)) + c.iter.step++ +} + +// InsertBefore inserts n before the current Node in its containing slice. +// If the current Node is not part of a slice, InsertBefore panics. +// Apply will not walk n. +func (c *Cursor) InsertBefore(n ast.Node) { + i := c.Index() + if i < 0 { + panic("InsertBefore node not contained in slice") + } + v := c.field() + v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) + l := v.Len() + reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) + v.Index(i).Set(reflect.ValueOf(n)) + c.iter.index++ +} + +// application carries all the shared data so we can pass it around cheaply. +type application struct { + pre, post ApplyFunc + cursor Cursor + iter iterator +} + +func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { + // convert typed nil into untyped nil + if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { + n = nil + } + + // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead + saved := a.cursor + a.cursor.parent = parent + a.cursor.name = name + a.cursor.iter = iter + a.cursor.node = n + + if a.pre != nil && !a.pre(&a.cursor) { + a.cursor = saved + return + } + + // walk children + // (the order of the cases matches the order of the corresponding node types in go/ast) + switch n := n.(type) { + case nil: + // nothing to do + + // Comments and fields + case *ast.Comment: + // nothing to do + + case *ast.CommentGroup: + if n != nil { + a.applyList(n, "List") + } + + case *ast.Field: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.FieldList: + a.applyList(n, "List") + + // Expressions + case *ast.BadExpr, *ast.Ident, *ast.BasicLit: + // nothing to do + + case *ast.Ellipsis: + a.apply(n, "Elt", nil, n.Elt) + + case *ast.FuncLit: + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + case *ast.CompositeLit: + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Elts") + + case *ast.ParenExpr: + a.apply(n, "X", nil, n.X) + + case *ast.SelectorExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Sel", nil, n.Sel) + + case *ast.IndexExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Index", nil, n.Index) + + case *ast.SliceExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Low", nil, n.Low) + a.apply(n, "High", nil, n.High) + a.apply(n, "Max", nil, n.Max) + + case *ast.TypeAssertExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Type", nil, n.Type) + + case *ast.CallExpr: + a.apply(n, "Fun", nil, n.Fun) + a.applyList(n, "Args") + + case *ast.StarExpr: + a.apply(n, "X", nil, n.X) + + case *ast.UnaryExpr: + a.apply(n, "X", nil, n.X) + + case *ast.BinaryExpr: + a.apply(n, "X", nil, n.X) + a.apply(n, "Y", nil, n.Y) + + case *ast.KeyValueExpr: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + // Types + case *ast.ArrayType: + a.apply(n, "Len", nil, n.Len) + a.apply(n, "Elt", nil, n.Elt) + + case *ast.StructType: + a.apply(n, "Fields", nil, n.Fields) + + case *ast.FuncType: + a.apply(n, "Params", nil, n.Params) + a.apply(n, "Results", nil, n.Results) + + case *ast.InterfaceType: + a.apply(n, "Methods", nil, n.Methods) + + case *ast.MapType: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + + case *ast.ChanType: + a.apply(n, "Value", nil, n.Value) + + // Statements + case *ast.BadStmt: + // nothing to do + + case *ast.DeclStmt: + a.apply(n, "Decl", nil, n.Decl) + + case *ast.EmptyStmt: + // nothing to do + + case *ast.LabeledStmt: + a.apply(n, "Label", nil, n.Label) + a.apply(n, "Stmt", nil, n.Stmt) + + case *ast.ExprStmt: + a.apply(n, "X", nil, n.X) + + case *ast.SendStmt: + a.apply(n, "Chan", nil, n.Chan) + a.apply(n, "Value", nil, n.Value) + + case *ast.IncDecStmt: + a.apply(n, "X", nil, n.X) + + case *ast.AssignStmt: + a.applyList(n, "Lhs") + a.applyList(n, "Rhs") + + case *ast.GoStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.DeferStmt: + a.apply(n, "Call", nil, n.Call) + + case *ast.ReturnStmt: + a.applyList(n, "Results") + + case *ast.BranchStmt: + a.apply(n, "Label", nil, n.Label) + + case *ast.BlockStmt: + a.applyList(n, "List") + + case *ast.IfStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Body", nil, n.Body) + a.apply(n, "Else", nil, n.Else) + + case *ast.CaseClause: + a.applyList(n, "List") + a.applyList(n, "Body") + + case *ast.SwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Tag", nil, n.Tag) + a.apply(n, "Body", nil, n.Body) + + case *ast.TypeSwitchStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Assign", nil, n.Assign) + a.apply(n, "Body", nil, n.Body) + + case *ast.CommClause: + a.apply(n, "Comm", nil, n.Comm) + a.applyList(n, "Body") + + case *ast.SelectStmt: + a.apply(n, "Body", nil, n.Body) + + case *ast.ForStmt: + a.apply(n, "Init", nil, n.Init) + a.apply(n, "Cond", nil, n.Cond) + a.apply(n, "Post", nil, n.Post) + a.apply(n, "Body", nil, n.Body) + + case *ast.RangeStmt: + a.apply(n, "Key", nil, n.Key) + a.apply(n, "Value", nil, n.Value) + a.apply(n, "X", nil, n.X) + a.apply(n, "Body", nil, n.Body) + + // Declarations + case *ast.ImportSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Path", nil, n.Path) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.ValueSpec: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Names") + a.apply(n, "Type", nil, n.Type) + a.applyList(n, "Values") + a.apply(n, "Comment", nil, n.Comment) + + case *ast.TypeSpec: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Comment", nil, n.Comment) + + case *ast.BadDecl: + // nothing to do + + case *ast.GenDecl: + a.apply(n, "Doc", nil, n.Doc) + a.applyList(n, "Specs") + + case *ast.FuncDecl: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Recv", nil, n.Recv) + a.apply(n, "Name", nil, n.Name) + a.apply(n, "Type", nil, n.Type) + a.apply(n, "Body", nil, n.Body) + + // Files and packages + case *ast.File: + a.apply(n, "Doc", nil, n.Doc) + a.apply(n, "Name", nil, n.Name) + a.applyList(n, "Decls") + // Don't walk n.Comments; they have either been walked already if + // they are Doc comments, or they can be easily walked explicitly. + + case *ast.Package: + // collect and sort names for reproducible behavior + var names []string + for name := range n.Files { + names = append(names, name) + } + sort.Strings(names) + for _, name := range names { + a.apply(n, name, nil, n.Files[name]) + } + + default: + panic(fmt.Sprintf("Apply: unexpected node type %T", n)) + } + + if a.post != nil && !a.post(&a.cursor) { + panic(abort) + } + + a.cursor = saved +} + +// An iterator controls iteration over a slice of nodes. +type iterator struct { + index, step int +} + +func (a *application) applyList(parent ast.Node, name string) { + // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead + saved := a.iter + a.iter.index = 0 + for { + // must reload parent.name each time, since cursor modifications might change it + v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) + if a.iter.index >= v.Len() { + break + } + + // element x may be nil in a bad AST - be cautious + var x ast.Node + if e := v.Index(a.iter.index); e.IsValid() { + x = e.Interface().(ast.Node) + } + + a.iter.step = 1 + a.apply(parent, name, &a.iter, x) + a.iter.index += a.iter.step + } + a.iter = saved +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go new file mode 100644 index 000000000..1c86970ff --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go @@ -0,0 +1,248 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package astutil_test + +import ( + "bytes" + "go/ast" + "go/format" + "go/parser" + "go/token" + "testing" + + "golang.org/x/tools/go/ast/astutil" +) + +var rewriteTests = [...]struct { + name string + orig, want string + pre, post astutil.ApplyFunc +}{ + {name: "nop", orig: "package p\n", want: "package p\n"}, + + {name: "replace", + orig: `package p + +var x int +`, + want: `package p + +var t T +`, + post: func(c *astutil.Cursor) bool { + if _, ok := c.Node().(*ast.ValueSpec); ok { + c.Replace(valspec("t", "T")) + return false + } + return true + }, + }, + + {name: "set doc strings", + orig: `package p + +const z = 0 + +type T struct{} + +var x int +`, + want: `package p +// a foo is a foo +const z = 0 +// a foo is a foo +type T struct{} +// a foo is a foo +var x int +`, + post: func(c *astutil.Cursor) bool { + if _, ok := c.Parent().(*ast.GenDecl); ok && c.Name() == "Doc" && c.Node() == nil { + c.Replace(&ast.CommentGroup{List: []*ast.Comment{{Text: "// a foo is a foo"}}}) + } + return true + }, + }, + + {name: "insert names", + orig: `package p + +const a = 1 +`, + want: `package p + +const a, b, c = 1, 2, 3 +`, + pre: func(c *astutil.Cursor) bool { + if _, ok := c.Parent().(*ast.ValueSpec); ok { + switch c.Name() { + case "Names": + c.InsertAfter(ast.NewIdent("c")) + c.InsertAfter(ast.NewIdent("b")) + case "Values": + c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "3"}) + c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "2"}) + } + } + return true + }, + }, + + {name: "insert", + orig: `package p + +var ( + x int + y int +) +`, + want: `package p + +var before1 int +var before2 int + +var ( + x int + y int +) +var after2 int +var after1 int +`, + pre: func(c *astutil.Cursor) bool { + if _, ok := c.Node().(*ast.GenDecl); ok { + c.InsertBefore(vardecl("before1", "int")) + c.InsertAfter(vardecl("after1", "int")) + c.InsertAfter(vardecl("after2", "int")) + c.InsertBefore(vardecl("before2", "int")) + } + return true + }, + }, + + {name: "delete", + orig: `package p + +var x int +var y int +var z int +`, + want: `package p + +var y int +var z int +`, + pre: func(c *astutil.Cursor) bool { + n := c.Node() + if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" { + c.Delete() + } + return true + }, + }, + + {name: "insertafter-delete", + orig: `package p + +var x int +var y int +var z int +`, + want: `package p + +var x1 int + +var y int +var z int +`, + pre: func(c *astutil.Cursor) bool { + n := c.Node() + if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" { + c.InsertAfter(vardecl("x1", "int")) + c.Delete() + } + return true + }, + }, + + {name: "delete-insertafter", + orig: `package p + +var x int +var y int +var z int +`, + want: `package p + +var y int +var x1 int +var z int +`, + pre: func(c *astutil.Cursor) bool { + n := c.Node() + if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" { + c.Delete() + // The cursor is now effectively atop the 'var y int' node. + c.InsertAfter(vardecl("x1", "int")) + } + return true + }, + }, +} + +func valspec(name, typ string) *ast.ValueSpec { + return &ast.ValueSpec{Names: []*ast.Ident{ast.NewIdent(name)}, + Type: ast.NewIdent(typ), + } +} + +func vardecl(name, typ string) *ast.GenDecl { + return &ast.GenDecl{ + Tok: token.VAR, + Specs: []ast.Spec{valspec(name, typ)}, + } +} + +func TestRewrite(t *testing.T) { + t.Run("*", func(t *testing.T) { + for _, test := range rewriteTests { + test := test + t.Run(test.name, func(t *testing.T) { + t.Parallel() + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments) + if err != nil { + t.Fatal(err) + } + n := astutil.Apply(f, test.pre, test.post) + var buf bytes.Buffer + if err := format.Node(&buf, fset, n); err != nil { + t.Fatal(err) + } + got := buf.String() + if got != test.want { + t.Errorf("got:\n\n%s\nwant:\n\n%s\n", got, test.want) + } + }) + } + }) +} + +var sink ast.Node + +func BenchmarkRewrite(b *testing.B) { + for _, test := range rewriteTests { + b.Run(test.name, func(b *testing.B) { + for i := 0; i < b.N; i++ { + b.StopTimer() + fset := token.NewFileSet() + f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments) + if err != nil { + b.Fatal(err) + } + b.StartTimer() + sink = astutil.Apply(f, test.pre, test.post) + } + }) + } +} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go new file mode 100644 index 000000000..763062982 --- /dev/null +++ b/vendor/golang.org/x/tools/go/ast/astutil/util.go @@ -0,0 +1,14 @@ +package astutil + +import "go/ast" + +// Unparen returns e with any enclosing parentheses stripped. +func Unparen(e ast.Expr) ast.Expr { + for { + p, ok := e.(*ast.ParenExpr) + if !ok { + return e + } + e = p.X + } +} From 979839eb5a22ca71c1e0188f2621b1ae05954a3c Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Sat, 28 Apr 2018 03:06:34 +0200 Subject: [PATCH 3/8] Improve handling of label clause All statements of Label children are merged into a block statement. Similar handling for block & fallthrough as for case clauses. --- tests/switch.c | 6 ++- transpiler/switch.go | 87 +++++++++++++++++++++++++++++++++++--------- 2 files changed, 75 insertions(+), 18 deletions(-) diff --git a/tests/switch.c b/tests/switch.c index ebbec3a67..5a6a1e86f 100644 --- a/tests/switch.c +++ b/tests/switch.c @@ -148,6 +148,7 @@ void goto_label(bool use_goto) LABELY: case 4: LABEL: + printf("x"); case 1: pass(__func__); break; @@ -341,7 +342,10 @@ void scoped_goto_label(bool use_goto) } LABELY: {} case 4: {} - LABEL: {} + LABEL: + { + printf("x"); + } case 1: { pass(__func__); diff --git a/transpiler/switch.go b/transpiler/switch.go index eb5eac50f..b442999f3 100644 --- a/transpiler/switch.go +++ b/transpiler/switch.go @@ -319,9 +319,21 @@ func appendCaseOrDefaultToNormalizedCases(cases []goast.Stmt, }) } if ls, ok := cases[len(cases)-1].(*goast.LabeledStmt); ok { - ls.Stmt = &goast.BranchStmt{ + ft := &goast.BranchStmt{ Tok: token.FALLTHROUGH, } + if _, ok2 := ls.Stmt.(*goast.EmptyStmt); ok2 { + ls.Stmt = ft + } else if bs, ok2 := ls.Stmt.(*goast.BlockStmt); ok2 { + bs.List = append(bs.List, ft) + } else { + ls.Stmt = &goast.BlockStmt{ + List: []goast.Stmt{ + ls.Stmt, + ft, + }, + } + } } } caseEndedWithBreak = false @@ -340,6 +352,18 @@ func appendCaseOrDefaultToNormalizedCases(cases []goast.Stmt, case *ast.LabelStmt: singleCase, newPre, newPost, err = transpileLabelStmt(c, p) + lc, ok := singleCase.(*goast.LabeledStmt) + if !ok { + panic("expected *goast.LabeledStmt") + } + if len(newPost) == 1 { + lc.Stmt = newPost[0] + } else if len(newPost) > 1 { + lc.Stmt = &goast.BlockStmt{ + List: newPost, + } + } + newPost = []goast.Stmt{} } if singleCase != nil { @@ -433,6 +457,21 @@ func handleLabelCases(cases []goast.Stmt, p *program.Program) (newCases []goast. Label: util.NewIdent(swEndLabel), Tok: token.GOTO, }) + funcTransformBreak := func(cursor *astutil.Cursor) bool { + if cursor == nil { + return true + } + node := cursor.Node() + if bs, ok := node.(*goast.BranchStmt); ok { + if bs.Tok == token.BREAK { + cursor.Replace(&goast.BranchStmt{ + Label: util.NewIdent(swEndLabel), + Tok: token.GOTO, + }) + } + } + return true + } for i, x := range cases { switch c := x.(type) { case *goast.CaseClause: @@ -454,21 +493,7 @@ func handleLabelCases(cases []goast.Stmt, p *program.Program) (newCases []goast. } // Replace break's with goto swEndLabel - astutil.Apply(c, nil, func(cursor *astutil.Cursor) bool { - if cursor == nil { - return true - } - node := cursor.Node() - if bs, ok := node.(*goast.BranchStmt); ok { - if bs.Tok == token.BREAK { - cursor.Replace(&goast.BranchStmt{ - Label: util.NewIdent(swEndLabel), - Tok: token.GOTO, - }) - } - } - return true - }) + astutil.Apply(c, nil, funcTransformBreak) body := c.Body // append caseLabel label followed by case body @@ -496,8 +521,36 @@ func handleLabelCases(cases []goast.Stmt, p *program.Program) (newCases []goast. } newCases = append(newCases, c) case *goast.LabeledStmt: - c.Stmt = &goast.EmptyStmt{} + var isFallThrough bool + // Remove fallthrough if it's the only statement + if v, ok := c.Stmt.(*goast.BranchStmt); ok { + if v.Tok == token.FALLTHROUGH { + c.Stmt = &goast.EmptyStmt{} + isFallThrough = true + } + } else if b, ok := c.Stmt.(*goast.BlockStmt); ok { + // Remove fallthrough if LabeledStmt contains a BlockStmt + if v, ok := b.List[len(b.List)-1].(*goast.BranchStmt); ok { + if v.Tok == token.FALLTHROUGH { + b.List = b.List[:len(b.List)-1] + isFallThrough = true + } + } + } + + // Replace break's with goto swEndLabel + astutil.Apply(c, nil, funcTransformBreak) + + // append label followed by label body postStmts = append(postStmts, c) + + // If not last case && no fallthrough goto swEndLabel + if i != len(cases)-1 && !isFallThrough { + postStmts = append(postStmts, &goast.BranchStmt{ + Label: util.NewIdent(swEndLabel), + Tok: token.GOTO, + }) + } } } postStmts = append(postStmts, &goast.LabeledStmt{ From fd72d772d8162e3adec250926c79ccd3e531934a Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Sat, 28 Apr 2018 22:01:22 +0200 Subject: [PATCH 4/8] Fix panic if Switch body contains NullStmt after LabelStmt --- transpiler/switch.go | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/transpiler/switch.go b/transpiler/switch.go index b442999f3..dfd1acb45 100644 --- a/transpiler/switch.go +++ b/transpiler/switch.go @@ -52,8 +52,8 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( cs, ok1 := cn.(*ast.CaseStmt) ds, ok2 := cn.(*ast.DefaultStmt) ls, ok3 := cn.(*ast.LabelStmt) - if !ok1 && !ok2 && !ok3 { - // Do not consider a node which is not a case or default statement here + if !ok1 && !ok2 && !ok3 || cn == nil || len(cn.Children()) == 0 { + // Do not consider a node which is not a case, label or default statement here continue } lastCn := cn.Children()[len(cn.Children())-1] @@ -103,6 +103,14 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( v.Children()[len(v.Children())-1] = &compoundStmt } } + // For simplification - each LabelStmt will have CompoundStmt + if v, ok := body.Children()[i].(*ast.LabelStmt); ok { + if _, ok := v.Children()[len(v.Children())-1].(*ast.CompoundStmt); !ok { + var compoundStmt ast.CompoundStmt + compoundStmt.AddChild(v.Children()[len(v.Children())-1]) + v.Children()[len(v.Children())-1] = &compoundStmt + } + } } hasLabelCase := false From 10adffdb07e9ab4d648134d1d8306a51cdafbaf5 Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Mon, 30 Apr 2018 20:55:44 +0200 Subject: [PATCH 5/8] Fix goto SW_GENERATED_LABEL_x jumps over declaration of variable --- tests/code_quality/for.expected.c | 2 +- tests/code_quality/if.expected.c | 2 +- tests/code_quality/operators.expected.c | 2 +- tests/code_quality/switch.expected.c | 6 +++++- tests/switch.c | 7 +++++-- transpiler/switch.go | 3 ++- 6 files changed, 15 insertions(+), 7 deletions(-) diff --git a/tests/code_quality/for.expected.c b/tests/code_quality/for.expected.c index e47929222..5f6808746 100644 --- a/tests/code_quality/for.expected.c +++ b/tests/code_quality/for.expected.c @@ -1,5 +1,5 @@ /* - Package main - transpiled by c2go version: v0.22.4 Aluminium 2018-04-24 + Package main - transpiled by c2go version: v0.23.0 Berkelium 2018-04-27 If you have found any issues, please raise an issue at: https://github.com/elliotchance/c2go/ diff --git a/tests/code_quality/if.expected.c b/tests/code_quality/if.expected.c index 47c64a923..76be90723 100644 --- a/tests/code_quality/if.expected.c +++ b/tests/code_quality/if.expected.c @@ -1,5 +1,5 @@ /* - Package main - transpiled by c2go version: v0.22.4 Aluminium 2018-04-24 + Package main - transpiled by c2go version: v0.23.0 Berkelium 2018-04-27 If you have found any issues, please raise an issue at: https://github.com/elliotchance/c2go/ diff --git a/tests/code_quality/operators.expected.c b/tests/code_quality/operators.expected.c index 84520a95c..4e57f2c37 100644 --- a/tests/code_quality/operators.expected.c +++ b/tests/code_quality/operators.expected.c @@ -1,5 +1,5 @@ /* - Package main - transpiled by c2go version: v0.22.4 Aluminium 2018-04-24 + Package main - transpiled by c2go version: v0.23.0 Berkelium 2018-04-27 If you have found any issues, please raise an issue at: https://github.com/elliotchance/c2go/ diff --git a/tests/code_quality/switch.expected.c b/tests/code_quality/switch.expected.c index df8d9295b..e8a0dc18a 100644 --- a/tests/code_quality/switch.expected.c +++ b/tests/code_quality/switch.expected.c @@ -1,5 +1,5 @@ /* - Package main - transpiled by c2go version: v0.22.4 Aluminium 2018-04-24 + Package main - transpiled by c2go version: v0.23.0 Berkelium 2018-04-27 If you have found any issues, please raise an issue at: https://github.com/elliotchance/c2go/ @@ -28,7 +28,11 @@ func switch_function() { return } case int32(4): + { + } case int32(5): + { + } case int32(6): fallthrough case int32(7): diff --git a/tests/switch.c b/tests/switch.c index 5a6a1e86f..5d3289410 100644 --- a/tests/switch.c +++ b/tests/switch.c @@ -316,7 +316,6 @@ void scoped_fallthrough_several_midway_default() } } - void scoped_goto_label(bool use_goto) { for (;;) { @@ -352,7 +351,11 @@ void scoped_goto_label(bool use_goto) break; } case 2: - {} + { + int x = 0; + printf("%d", x); + break; + } LABELX: {} default: { diff --git a/transpiler/switch.go b/transpiler/switch.go index dfd1acb45..752cbc395 100644 --- a/transpiler/switch.go +++ b/transpiler/switch.go @@ -196,7 +196,8 @@ func transpileSwitchStmt(n *ast.SwitchStmt, p *program.Program) ( if vv, ok := v.List[len(v.List)-1].(*goast.BranchStmt); ok { if vv.Tok == token.BREAK { if isFallThrough { - cs.Body = append(v.List[:len(v.List)-1]) + v.List = v.List[:len(v.List)-1] + cs.Body = body[:len(body)-1] continue } } From 9fec47fc504f4d54a09475ea0ffb67e4bbd4904f Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Fri, 25 May 2018 16:01:14 +0200 Subject: [PATCH 6/8] Add test for nested for loop with break --- tests/switch.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/switch.c b/tests/switch.c index 5d3289410..ae7cf1e2f 100644 --- a/tests/switch.c +++ b/tests/switch.c @@ -135,6 +135,8 @@ void goto_label(bool use_goto) continue; case 0: if (use_goto) { + for (;;) + break; goto LABEL; fail("code should not reach here"); } else if (false) { @@ -328,6 +330,9 @@ void scoped_goto_label(bool use_goto) case 0: { if (use_goto) { + for (;;) { + break; + } goto LABEL; fail("code should not reach here"); } else if (false) { From 76f756679de46a0e8a2c900bdb75a405ef8c3f05 Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Fri, 25 May 2018 16:03:35 +0200 Subject: [PATCH 7/8] Fix failing test: do not replace nested breaks --- transpiler/switch.go | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) diff --git a/transpiler/switch.go b/transpiler/switch.go index 752cbc395..85ce881e0 100644 --- a/transpiler/switch.go +++ b/transpiler/switch.go @@ -479,6 +479,21 @@ func handleLabelCases(cases []goast.Stmt, p *program.Program) (newCases []goast. }) } } + if _, ok := node.(*goast.ForStmt); ok { + return false + } + if _, ok := node.(*goast.RangeStmt); ok { + return false + } + if _, ok := node.(*goast.SwitchStmt); ok { + return false + } + if _, ok := node.(*goast.TypeSwitchStmt); ok { + return false + } + if _, ok := node.(*goast.SelectStmt); ok { + return false + } return true } for i, x := range cases { @@ -502,7 +517,7 @@ func handleLabelCases(cases []goast.Stmt, p *program.Program) (newCases []goast. } // Replace break's with goto swEndLabel - astutil.Apply(c, nil, funcTransformBreak) + astutil.Apply(c, funcTransformBreak, nil) body := c.Body // append caseLabel label followed by case body @@ -548,7 +563,7 @@ func handleLabelCases(cases []goast.Stmt, p *program.Program) (newCases []goast. } // Replace break's with goto swEndLabel - astutil.Apply(c, nil, funcTransformBreak) + astutil.Apply(c, funcTransformBreak, nil) // append label followed by label body postStmts = append(postStmts, c) From 0f6a60b9fd167654926aac166c383a6294a9cdcd Mon Sep 17 00:00:00 2001 From: Christophe Kamphaus Date: Mon, 4 Jun 2018 09:32:38 +0200 Subject: [PATCH 8/8] Remove vendor/ folder --- .../x/tools/go/ast/astutil/enclosing.go | 627 ------ .../x/tools/go/ast/astutil/enclosing_test.go | 195 -- .../x/tools/go/ast/astutil/imports.go | 470 ----- .../x/tools/go/ast/astutil/imports_test.go | 1818 ----------------- .../x/tools/go/ast/astutil/rewrite.go | 477 ----- .../x/tools/go/ast/astutil/rewrite_test.go | 248 --- .../golang.org/x/tools/go/ast/astutil/util.go | 14 - 7 files changed, 3849 deletions(-) delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/imports_test.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go delete mode 100644 vendor/golang.org/x/tools/go/ast/astutil/util.go diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go deleted file mode 100644 index 6b7052b89..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing.go +++ /dev/null @@ -1,627 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -// This file defines utilities for working with source positions. - -import ( - "fmt" - "go/ast" - "go/token" - "sort" -) - -// PathEnclosingInterval returns the node that encloses the source -// interval [start, end), and all its ancestors up to the AST root. -// -// The definition of "enclosing" used by this function considers -// additional whitespace abutting a node to be enclosed by it. -// In this example: -// -// z := x + y // add them -// <-A-> -// <----B-----> -// -// the ast.BinaryExpr(+) node is considered to enclose interval B -// even though its [Pos()..End()) is actually only interval A. -// This behaviour makes user interfaces more tolerant of imperfect -// input. -// -// This function treats tokens as nodes, though they are not included -// in the result. e.g. PathEnclosingInterval("+") returns the -// enclosing ast.BinaryExpr("x + y"). -// -// If start==end, the 1-char interval following start is used instead. -// -// The 'exact' result is true if the interval contains only path[0] -// and perhaps some adjacent whitespace. It is false if the interval -// overlaps multiple children of path[0], or if it contains only -// interior whitespace of path[0]. -// In this example: -// -// z := x + y // add them -// <--C--> <---E--> -// ^ -// D -// -// intervals C, D and E are inexact. C is contained by the -// z-assignment statement, because it spans three of its children (:=, -// x, +). So too is the 1-char interval D, because it contains only -// interior whitespace of the assignment. E is considered interior -// whitespace of the BlockStmt containing the assignment. -// -// Precondition: [start, end) both lie within the same file as root. -// TODO(adonovan): return (nil, false) in this case and remove precond. -// Requires FileSet; see loader.tokenFileContainsPos. -// -// Postcondition: path is never nil; it always contains at least 'root'. -// -func PathEnclosingInterval(root *ast.File, start, end token.Pos) (path []ast.Node, exact bool) { - // fmt.Printf("EnclosingInterval %d %d\n", start, end) // debugging - - // Precondition: node.[Pos..End) and adjoining whitespace contain [start, end). - var visit func(node ast.Node) bool - visit = func(node ast.Node) bool { - path = append(path, node) - - nodePos := node.Pos() - nodeEnd := node.End() - - // fmt.Printf("visit(%T, %d, %d)\n", node, nodePos, nodeEnd) // debugging - - // Intersect [start, end) with interval of node. - if start < nodePos { - start = nodePos - } - if end > nodeEnd { - end = nodeEnd - } - - // Find sole child that contains [start, end). - children := childrenOf(node) - l := len(children) - for i, child := range children { - // [childPos, childEnd) is unaugmented interval of child. - childPos := child.Pos() - childEnd := child.End() - - // [augPos, augEnd) is whitespace-augmented interval of child. - augPos := childPos - augEnd := childEnd - if i > 0 { - augPos = children[i-1].End() // start of preceding whitespace - } - if i < l-1 { - nextChildPos := children[i+1].Pos() - // Does [start, end) lie between child and next child? - if start >= augEnd && end <= nextChildPos { - return false // inexact match - } - augEnd = nextChildPos // end of following whitespace - } - - // fmt.Printf("\tchild %d: [%d..%d)\tcontains interval [%d..%d)?\n", - // i, augPos, augEnd, start, end) // debugging - - // Does augmented child strictly contain [start, end)? - if augPos <= start && end <= augEnd { - _, isToken := child.(tokenNode) - return isToken || visit(child) - } - - // Does [start, end) overlap multiple children? - // i.e. left-augmented child contains start - // but LR-augmented child does not contain end. - if start < childEnd && end > augEnd { - break - } - } - - // No single child contained [start, end), - // so node is the result. Is it exact? - - // (It's tempting to put this condition before the - // child loop, but it gives the wrong result in the - // case where a node (e.g. ExprStmt) and its sole - // child have equal intervals.) - if start == nodePos && end == nodeEnd { - return true // exact match - } - - return false // inexact: overlaps multiple children - } - - if start > end { - start, end = end, start - } - - if start < root.End() && end > root.Pos() { - if start == end { - end = start + 1 // empty interval => interval of size 1 - } - exact = visit(root) - - // Reverse the path: - for i, l := 0, len(path); i < l/2; i++ { - path[i], path[l-1-i] = path[l-1-i], path[i] - } - } else { - // Selection lies within whitespace preceding the - // first (or following the last) declaration in the file. - // The result nonetheless always includes the ast.File. - path = append(path, root) - } - - return -} - -// tokenNode is a dummy implementation of ast.Node for a single token. -// They are used transiently by PathEnclosingInterval but never escape -// this package. -// -type tokenNode struct { - pos token.Pos - end token.Pos -} - -func (n tokenNode) Pos() token.Pos { - return n.pos -} - -func (n tokenNode) End() token.Pos { - return n.end -} - -func tok(pos token.Pos, len int) ast.Node { - return tokenNode{pos, pos + token.Pos(len)} -} - -// childrenOf returns the direct non-nil children of ast.Node n. -// It may include fake ast.Node implementations for bare tokens. -// it is not safe to call (e.g.) ast.Walk on such nodes. -// -func childrenOf(n ast.Node) []ast.Node { - var children []ast.Node - - // First add nodes for all true subtrees. - ast.Inspect(n, func(node ast.Node) bool { - if node == n { // push n - return true // recur - } - if node != nil { // push child - children = append(children, node) - } - return false // no recursion - }) - - // Then add fake Nodes for bare tokens. - switch n := n.(type) { - case *ast.ArrayType: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Elt.End(), len("]"))) - - case *ast.AssignStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.BasicLit: - children = append(children, - tok(n.ValuePos, len(n.Value))) - - case *ast.BinaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.BlockStmt: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("}"))) - - case *ast.BranchStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.CallExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - if n.Ellipsis != 0 { - children = append(children, tok(n.Ellipsis, len("..."))) - } - - case *ast.CaseClause: - if n.List == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.ChanType: - switch n.Dir { - case ast.RECV: - children = append(children, tok(n.Begin, len("<-chan"))) - case ast.SEND: - children = append(children, tok(n.Begin, len("chan<-"))) - case ast.RECV | ast.SEND: - children = append(children, tok(n.Begin, len("chan"))) - } - - case *ast.CommClause: - if n.Comm == nil { - children = append(children, - tok(n.Case, len("default"))) - } else { - children = append(children, - tok(n.Case, len("case"))) - } - children = append(children, tok(n.Colon, len(":"))) - - case *ast.Comment: - // nop - - case *ast.CommentGroup: - // nop - - case *ast.CompositeLit: - children = append(children, - tok(n.Lbrace, len("{")), - tok(n.Rbrace, len("{"))) - - case *ast.DeclStmt: - // nop - - case *ast.DeferStmt: - children = append(children, - tok(n.Defer, len("defer"))) - - case *ast.Ellipsis: - children = append(children, - tok(n.Ellipsis, len("..."))) - - case *ast.EmptyStmt: - // nop - - case *ast.ExprStmt: - // nop - - case *ast.Field: - // TODO(adonovan): Field.{Doc,Comment,Tag}? - - case *ast.FieldList: - children = append(children, - tok(n.Opening, len("(")), - tok(n.Closing, len(")"))) - - case *ast.File: - // TODO test: Doc - children = append(children, - tok(n.Package, len("package"))) - - case *ast.ForStmt: - children = append(children, - tok(n.For, len("for"))) - - case *ast.FuncDecl: - // TODO(adonovan): FuncDecl.Comment? - - // Uniquely, FuncDecl breaks the invariant that - // preorder traversal yields tokens in lexical order: - // in fact, FuncDecl.Recv precedes FuncDecl.Type.Func. - // - // As a workaround, we inline the case for FuncType - // here and order things correctly. - // - children = nil // discard ast.Walk(FuncDecl) info subtrees - children = append(children, tok(n.Type.Func, len("func"))) - if n.Recv != nil { - children = append(children, n.Recv) - } - children = append(children, n.Name) - if n.Type.Params != nil { - children = append(children, n.Type.Params) - } - if n.Type.Results != nil { - children = append(children, n.Type.Results) - } - if n.Body != nil { - children = append(children, n.Body) - } - - case *ast.FuncLit: - // nop - - case *ast.FuncType: - if n.Func != 0 { - children = append(children, - tok(n.Func, len("func"))) - } - - case *ast.GenDecl: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - if n.Lparen != 0 { - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - } - - case *ast.GoStmt: - children = append(children, - tok(n.Go, len("go"))) - - case *ast.Ident: - children = append(children, - tok(n.NamePos, len(n.Name))) - - case *ast.IfStmt: - children = append(children, - tok(n.If, len("if"))) - - case *ast.ImportSpec: - // TODO(adonovan): ImportSpec.{Doc,EndPos}? - - case *ast.IncDecStmt: - children = append(children, - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.IndexExpr: - children = append(children, - tok(n.Lbrack, len("{")), - tok(n.Rbrack, len("}"))) - - case *ast.InterfaceType: - children = append(children, - tok(n.Interface, len("interface"))) - - case *ast.KeyValueExpr: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.LabeledStmt: - children = append(children, - tok(n.Colon, len(":"))) - - case *ast.MapType: - children = append(children, - tok(n.Map, len("map"))) - - case *ast.ParenExpr: - children = append(children, - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.RangeStmt: - children = append(children, - tok(n.For, len("for")), - tok(n.TokPos, len(n.Tok.String()))) - - case *ast.ReturnStmt: - children = append(children, - tok(n.Return, len("return"))) - - case *ast.SelectStmt: - children = append(children, - tok(n.Select, len("select"))) - - case *ast.SelectorExpr: - // nop - - case *ast.SendStmt: - children = append(children, - tok(n.Arrow, len("<-"))) - - case *ast.SliceExpr: - children = append(children, - tok(n.Lbrack, len("[")), - tok(n.Rbrack, len("]"))) - - case *ast.StarExpr: - children = append(children, tok(n.Star, len("*"))) - - case *ast.StructType: - children = append(children, tok(n.Struct, len("struct"))) - - case *ast.SwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.TypeAssertExpr: - children = append(children, - tok(n.Lparen-1, len(".")), - tok(n.Lparen, len("(")), - tok(n.Rparen, len(")"))) - - case *ast.TypeSpec: - // TODO(adonovan): TypeSpec.{Doc,Comment}? - - case *ast.TypeSwitchStmt: - children = append(children, tok(n.Switch, len("switch"))) - - case *ast.UnaryExpr: - children = append(children, tok(n.OpPos, len(n.Op.String()))) - - case *ast.ValueSpec: - // TODO(adonovan): ValueSpec.{Doc,Comment}? - - case *ast.BadDecl, *ast.BadExpr, *ast.BadStmt: - // nop - } - - // TODO(adonovan): opt: merge the logic of ast.Inspect() into - // the switch above so we can make interleaved callbacks for - // both Nodes and Tokens in the right order and avoid the need - // to sort. - sort.Sort(byPos(children)) - - return children -} - -type byPos []ast.Node - -func (sl byPos) Len() int { - return len(sl) -} -func (sl byPos) Less(i, j int) bool { - return sl[i].Pos() < sl[j].Pos() -} -func (sl byPos) Swap(i, j int) { - sl[i], sl[j] = sl[j], sl[i] -} - -// NodeDescription returns a description of the concrete type of n suitable -// for a user interface. -// -// TODO(adonovan): in some cases (e.g. Field, FieldList, Ident, -// StarExpr) we could be much more specific given the path to the AST -// root. Perhaps we should do that. -// -func NodeDescription(n ast.Node) string { - switch n := n.(type) { - case *ast.ArrayType: - return "array type" - case *ast.AssignStmt: - return "assignment" - case *ast.BadDecl: - return "bad declaration" - case *ast.BadExpr: - return "bad expression" - case *ast.BadStmt: - return "bad statement" - case *ast.BasicLit: - return "basic literal" - case *ast.BinaryExpr: - return fmt.Sprintf("binary %s operation", n.Op) - case *ast.BlockStmt: - return "block" - case *ast.BranchStmt: - switch n.Tok { - case token.BREAK: - return "break statement" - case token.CONTINUE: - return "continue statement" - case token.GOTO: - return "goto statement" - case token.FALLTHROUGH: - return "fall-through statement" - } - case *ast.CallExpr: - if len(n.Args) == 1 && !n.Ellipsis.IsValid() { - return "function call (or conversion)" - } - return "function call" - case *ast.CaseClause: - return "case clause" - case *ast.ChanType: - return "channel type" - case *ast.CommClause: - return "communication clause" - case *ast.Comment: - return "comment" - case *ast.CommentGroup: - return "comment group" - case *ast.CompositeLit: - return "composite literal" - case *ast.DeclStmt: - return NodeDescription(n.Decl) + " statement" - case *ast.DeferStmt: - return "defer statement" - case *ast.Ellipsis: - return "ellipsis" - case *ast.EmptyStmt: - return "empty statement" - case *ast.ExprStmt: - return "expression statement" - case *ast.Field: - // Can be any of these: - // struct {x, y int} -- struct field(s) - // struct {T} -- anon struct field - // interface {I} -- interface embedding - // interface {f()} -- interface method - // func (A) func(B) C -- receiver, param(s), result(s) - return "field/method/parameter" - case *ast.FieldList: - return "field/method/parameter list" - case *ast.File: - return "source file" - case *ast.ForStmt: - return "for loop" - case *ast.FuncDecl: - return "function declaration" - case *ast.FuncLit: - return "function literal" - case *ast.FuncType: - return "function type" - case *ast.GenDecl: - switch n.Tok { - case token.IMPORT: - return "import declaration" - case token.CONST: - return "constant declaration" - case token.TYPE: - return "type declaration" - case token.VAR: - return "variable declaration" - } - case *ast.GoStmt: - return "go statement" - case *ast.Ident: - return "identifier" - case *ast.IfStmt: - return "if statement" - case *ast.ImportSpec: - return "import specification" - case *ast.IncDecStmt: - if n.Tok == token.INC { - return "increment statement" - } - return "decrement statement" - case *ast.IndexExpr: - return "index expression" - case *ast.InterfaceType: - return "interface type" - case *ast.KeyValueExpr: - return "key/value association" - case *ast.LabeledStmt: - return "statement label" - case *ast.MapType: - return "map type" - case *ast.Package: - return "package" - case *ast.ParenExpr: - return "parenthesized " + NodeDescription(n.X) - case *ast.RangeStmt: - return "range loop" - case *ast.ReturnStmt: - return "return statement" - case *ast.SelectStmt: - return "select statement" - case *ast.SelectorExpr: - return "selector" - case *ast.SendStmt: - return "channel send" - case *ast.SliceExpr: - return "slice expression" - case *ast.StarExpr: - return "*-operation" // load/store expr or pointer type - case *ast.StructType: - return "struct type" - case *ast.SwitchStmt: - return "switch statement" - case *ast.TypeAssertExpr: - return "type assertion" - case *ast.TypeSpec: - return "type specification" - case *ast.TypeSwitchStmt: - return "type switch" - case *ast.UnaryExpr: - return fmt.Sprintf("unary %s operation", n.Op) - case *ast.ValueSpec: - return "value specification" - - } - panic(fmt.Sprintf("unexpected node type: %T", n)) -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go b/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go deleted file mode 100644 index 107f87c55..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/enclosing_test.go +++ /dev/null @@ -1,195 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil_test - -// This file defines tests of PathEnclosingInterval. - -// TODO(adonovan): exhaustive tests that run over the whole input -// tree, not just handcrafted examples. - -import ( - "bytes" - "fmt" - "go/ast" - "go/parser" - "go/token" - "strings" - "testing" - - "golang.org/x/tools/go/ast/astutil" -) - -// pathToString returns a string containing the concrete types of the -// nodes in path. -func pathToString(path []ast.Node) string { - var buf bytes.Buffer - fmt.Fprint(&buf, "[") - for i, n := range path { - if i > 0 { - fmt.Fprint(&buf, " ") - } - fmt.Fprint(&buf, strings.TrimPrefix(fmt.Sprintf("%T", n), "*ast.")) - } - fmt.Fprint(&buf, "]") - return buf.String() -} - -// findInterval parses input and returns the [start, end) positions of -// the first occurrence of substr in input. f==nil indicates failure; -// an error has already been reported in that case. -// -func findInterval(t *testing.T, fset *token.FileSet, input, substr string) (f *ast.File, start, end token.Pos) { - f, err := parser.ParseFile(fset, "", input, 0) - if err != nil { - t.Errorf("parse error: %s", err) - return - } - - i := strings.Index(input, substr) - if i < 0 { - t.Errorf("%q is not a substring of input", substr) - f = nil - return - } - - filePos := fset.File(f.Package) - return f, filePos.Pos(i), filePos.Pos(i + len(substr)) -} - -// Common input for following tests. -const input = ` -// Hello. -package main -import "fmt" -func f() {} -func main() { - z := (x + y) // add them - f() // NB: ExprStmt and its CallExpr have same Pos/End -} -` - -func TestPathEnclosingInterval_Exact(t *testing.T) { - // For the exact tests, we check that a substring is mapped to - // the canonical string for the node it denotes. - tests := []struct { - substr string // first occurrence of this string indicates interval - node string // complete text of expected containing node - }{ - {"package", - input[11 : len(input)-1]}, - {"\npack", - input[11 : len(input)-1]}, - {"main", - "main"}, - {"import", - "import \"fmt\""}, - {"\"fmt\"", - "\"fmt\""}, - {"\nfunc f() {}\n", - "func f() {}"}, - {"x ", - "x"}, - {" y", - "y"}, - {"z", - "z"}, - {" + ", - "x + y"}, - {" :=", - "z := (x + y)"}, - {"x + y", - "x + y"}, - {"(x + y)", - "(x + y)"}, - {" (x + y) ", - "(x + y)"}, - {" (x + y) // add", - "(x + y)"}, - {"func", - "func f() {}"}, - {"func f() {}", - "func f() {}"}, - {"\nfun", - "func f() {}"}, - {" f", - "f"}, - } - for _, test := range tests { - f, start, end := findInterval(t, new(token.FileSet), input, test.substr) - if f == nil { - continue - } - - path, exact := astutil.PathEnclosingInterval(f, start, end) - if !exact { - t.Errorf("PathEnclosingInterval(%q) not exact", test.substr) - continue - } - - if len(path) == 0 { - if test.node != "" { - t.Errorf("PathEnclosingInterval(%q).path: got [], want %q", - test.substr, test.node) - } - continue - } - - if got := input[path[0].Pos():path[0].End()]; got != test.node { - t.Errorf("PathEnclosingInterval(%q): got %q, want %q (path was %s)", - test.substr, got, test.node, pathToString(path)) - continue - } - } -} - -func TestPathEnclosingInterval_Paths(t *testing.T) { - // For these tests, we check only the path of the enclosing - // node, but not its complete text because it's often quite - // large when !exact. - tests := []struct { - substr string // first occurrence of this string indicates interval - path string // the pathToString(),exact of the expected path - }{ - {"// add", - "[BlockStmt FuncDecl File],false"}, - {"(x + y", - "[ParenExpr AssignStmt BlockStmt FuncDecl File],false"}, - {"x +", - "[BinaryExpr ParenExpr AssignStmt BlockStmt FuncDecl File],false"}, - {"z := (x", - "[AssignStmt BlockStmt FuncDecl File],false"}, - {"func f", - "[FuncDecl File],false"}, - {"func f()", - "[FuncDecl File],false"}, - {" f()", - "[FuncDecl File],false"}, - {"() {}", - "[FuncDecl File],false"}, - {"// Hello", - "[File],false"}, - {" f", - "[Ident FuncDecl File],true"}, - {"func ", - "[FuncDecl File],true"}, - {"mai", - "[Ident File],true"}, - {"f() // NB", - "[CallExpr ExprStmt BlockStmt FuncDecl File],true"}, - } - for _, test := range tests { - f, start, end := findInterval(t, new(token.FileSet), input, test.substr) - if f == nil { - continue - } - - path, exact := astutil.PathEnclosingInterval(f, start, end) - if got := fmt.Sprintf("%s,%v", pathToString(path), exact); got != test.path { - t.Errorf("PathEnclosingInterval(%q): got %q, want %q", - test.substr, got, test.path) - continue - } - } -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports.go b/vendor/golang.org/x/tools/go/ast/astutil/imports.go deleted file mode 100644 index 83f196cd5..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports.go +++ /dev/null @@ -1,470 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package astutil contains common utilities for working with the Go AST. -package astutil // import "golang.org/x/tools/go/ast/astutil" - -import ( - "fmt" - "go/ast" - "go/token" - "strconv" - "strings" -) - -// AddImport adds the import path to the file f, if absent. -func AddImport(fset *token.FileSet, f *ast.File, ipath string) (added bool) { - return AddNamedImport(fset, f, "", ipath) -} - -// AddNamedImport adds the import path to the file f, if absent. -// If name is not empty, it is used to rename the import. -// -// For example, calling -// AddNamedImport(fset, f, "pathpkg", "path") -// adds -// import pathpkg "path" -func AddNamedImport(fset *token.FileSet, f *ast.File, name, ipath string) (added bool) { - if imports(f, ipath) { - return false - } - - newImport := &ast.ImportSpec{ - Path: &ast.BasicLit{ - Kind: token.STRING, - Value: strconv.Quote(ipath), - }, - } - if name != "" { - newImport.Name = &ast.Ident{Name: name} - } - - // Find an import decl to add to. - // The goal is to find an existing import - // whose import path has the longest shared - // prefix with ipath. - var ( - bestMatch = -1 // length of longest shared prefix - lastImport = -1 // index in f.Decls of the file's final import decl - impDecl *ast.GenDecl // import decl containing the best match - impIndex = -1 // spec index in impDecl containing the best match - - isThirdPartyPath = isThirdParty(ipath) - ) - for i, decl := range f.Decls { - gen, ok := decl.(*ast.GenDecl) - if ok && gen.Tok == token.IMPORT { - lastImport = i - // Do not add to import "C", to avoid disrupting the - // association with its doc comment, breaking cgo. - if declImports(gen, "C") { - continue - } - - // Match an empty import decl if that's all that is available. - if len(gen.Specs) == 0 && bestMatch == -1 { - impDecl = gen - } - - // Compute longest shared prefix with imports in this group and find best - // matched import spec. - // 1. Always prefer import spec with longest shared prefix. - // 2. While match length is 0, - // - for stdlib package: prefer first import spec. - // - for third party package: prefer first third party import spec. - // We cannot use last import spec as best match for third party package - // because grouped imports are usually placed last by goimports -local - // flag. - // See issue #19190. - seenAnyThirdParty := false - for j, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - p := importPath(impspec) - n := matchLen(p, ipath) - if n > bestMatch || (bestMatch == 0 && !seenAnyThirdParty && isThirdPartyPath) { - bestMatch = n - impDecl = gen - impIndex = j - } - seenAnyThirdParty = seenAnyThirdParty || isThirdParty(p) - } - } - } - - // If no import decl found, add one after the last import. - if impDecl == nil { - impDecl = &ast.GenDecl{ - Tok: token.IMPORT, - } - if lastImport >= 0 { - impDecl.TokPos = f.Decls[lastImport].End() - } else { - // There are no existing imports. - // Our new import goes after the package declaration and after - // the comment, if any, that starts on the same line as the - // package declaration. - impDecl.TokPos = f.Package - - file := fset.File(f.Package) - pkgLine := file.Line(f.Package) - for _, c := range f.Comments { - if file.Line(c.Pos()) > pkgLine { - break - } - impDecl.TokPos = c.End() - } - } - f.Decls = append(f.Decls, nil) - copy(f.Decls[lastImport+2:], f.Decls[lastImport+1:]) - f.Decls[lastImport+1] = impDecl - } - - // Insert new import at insertAt. - insertAt := 0 - if impIndex >= 0 { - // insert after the found import - insertAt = impIndex + 1 - } - impDecl.Specs = append(impDecl.Specs, nil) - copy(impDecl.Specs[insertAt+1:], impDecl.Specs[insertAt:]) - impDecl.Specs[insertAt] = newImport - pos := impDecl.Pos() - if insertAt > 0 { - // If there is a comment after an existing import, preserve the comment - // position by adding the new import after the comment. - if spec, ok := impDecl.Specs[insertAt-1].(*ast.ImportSpec); ok && spec.Comment != nil { - pos = spec.Comment.End() - } else { - // Assign same position as the previous import, - // so that the sorter sees it as being in the same block. - pos = impDecl.Specs[insertAt-1].Pos() - } - } - if newImport.Name != nil { - newImport.Name.NamePos = pos - } - newImport.Path.ValuePos = pos - newImport.EndPos = pos - - // Clean up parens. impDecl contains at least one spec. - if len(impDecl.Specs) == 1 { - // Remove unneeded parens. - impDecl.Lparen = token.NoPos - } else if !impDecl.Lparen.IsValid() { - // impDecl needs parens added. - impDecl.Lparen = impDecl.Specs[0].Pos() - } - - f.Imports = append(f.Imports, newImport) - - if len(f.Decls) <= 1 { - return true - } - - // Merge all the import declarations into the first one. - var first *ast.GenDecl - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT || declImports(gen, "C") { - continue - } - if first == nil { - first = gen - continue // Don't touch the first one. - } - // We now know there is more than one package in this import - // declaration. Ensure that it ends up parenthesized. - first.Lparen = first.Pos() - // Move the imports of the other import declaration to the first one. - for _, spec := range gen.Specs { - spec.(*ast.ImportSpec).Path.ValuePos = first.Pos() - first.Specs = append(first.Specs, spec) - } - f.Decls = append(f.Decls[:i], f.Decls[i+1:]...) - i-- - } - - return true -} - -func isThirdParty(importPath string) bool { - // Third party package import path usually contains "." (".com", ".org", ...) - // This logic is taken from golang.org/x/tools/imports package. - return strings.Contains(importPath, ".") -} - -// DeleteImport deletes the import path from the file f, if present. -func DeleteImport(fset *token.FileSet, f *ast.File, path string) (deleted bool) { - return DeleteNamedImport(fset, f, "", path) -} - -// DeleteNamedImport deletes the import with the given name and path from the file f, if present. -func DeleteNamedImport(fset *token.FileSet, f *ast.File, name, path string) (deleted bool) { - var delspecs []*ast.ImportSpec - var delcomments []*ast.CommentGroup - - // Find the import nodes that import path, if any. - for i := 0; i < len(f.Decls); i++ { - decl := f.Decls[i] - gen, ok := decl.(*ast.GenDecl) - if !ok || gen.Tok != token.IMPORT { - continue - } - for j := 0; j < len(gen.Specs); j++ { - spec := gen.Specs[j] - impspec := spec.(*ast.ImportSpec) - if impspec.Name == nil && name != "" { - continue - } - if impspec.Name != nil && impspec.Name.Name != name { - continue - } - if importPath(impspec) != path { - continue - } - - // We found an import spec that imports path. - // Delete it. - delspecs = append(delspecs, impspec) - deleted = true - copy(gen.Specs[j:], gen.Specs[j+1:]) - gen.Specs = gen.Specs[:len(gen.Specs)-1] - - // If this was the last import spec in this decl, - // delete the decl, too. - if len(gen.Specs) == 0 { - copy(f.Decls[i:], f.Decls[i+1:]) - f.Decls = f.Decls[:len(f.Decls)-1] - i-- - break - } else if len(gen.Specs) == 1 { - if impspec.Doc != nil { - delcomments = append(delcomments, impspec.Doc) - } - if impspec.Comment != nil { - delcomments = append(delcomments, impspec.Comment) - } - for _, cg := range f.Comments { - // Found comment on the same line as the import spec. - if cg.End() < impspec.Pos() && fset.Position(cg.End()).Line == fset.Position(impspec.Pos()).Line { - delcomments = append(delcomments, cg) - break - } - } - - spec := gen.Specs[0].(*ast.ImportSpec) - - // Move the documentation right after the import decl. - if spec.Doc != nil { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Doc.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - } - for _, cg := range f.Comments { - if cg.End() < spec.Pos() && fset.Position(cg.End()).Line == fset.Position(spec.Pos()).Line { - for fset.Position(gen.TokPos).Line+1 < fset.Position(spec.Pos()).Line { - fset.File(gen.TokPos).MergeLine(fset.Position(gen.TokPos).Line) - } - break - } - } - } - if j > 0 { - lastImpspec := gen.Specs[j-1].(*ast.ImportSpec) - lastLine := fset.Position(lastImpspec.Path.ValuePos).Line - line := fset.Position(impspec.Path.ValuePos).Line - - // We deleted an entry but now there may be - // a blank line-sized hole where the import was. - if line-lastLine > 1 { - // There was a blank line immediately preceding the deleted import, - // so there's no need to close the hole. - // Do nothing. - } else if line != fset.File(gen.Rparen).LineCount() { - // There was no blank line. Close the hole. - fset.File(gen.Rparen).MergeLine(line) - } - } - j-- - } - } - - // Delete imports from f.Imports. - for i := 0; i < len(f.Imports); i++ { - imp := f.Imports[i] - for j, del := range delspecs { - if imp == del { - copy(f.Imports[i:], f.Imports[i+1:]) - f.Imports = f.Imports[:len(f.Imports)-1] - copy(delspecs[j:], delspecs[j+1:]) - delspecs = delspecs[:len(delspecs)-1] - i-- - break - } - } - } - - // Delete comments from f.Comments. - for i := 0; i < len(f.Comments); i++ { - cg := f.Comments[i] - for j, del := range delcomments { - if cg == del { - copy(f.Comments[i:], f.Comments[i+1:]) - f.Comments = f.Comments[:len(f.Comments)-1] - copy(delcomments[j:], delcomments[j+1:]) - delcomments = delcomments[:len(delcomments)-1] - i-- - break - } - } - } - - if len(delspecs) > 0 { - panic(fmt.Sprintf("deleted specs from Decls but not Imports: %v", delspecs)) - } - - return -} - -// RewriteImport rewrites any import of path oldPath to path newPath. -func RewriteImport(fset *token.FileSet, f *ast.File, oldPath, newPath string) (rewrote bool) { - for _, imp := range f.Imports { - if importPath(imp) == oldPath { - rewrote = true - // record old End, because the default is to compute - // it using the length of imp.Path.Value. - imp.EndPos = imp.End() - imp.Path.Value = strconv.Quote(newPath) - } - } - return -} - -// UsesImport reports whether a given import is used. -func UsesImport(f *ast.File, path string) (used bool) { - spec := importSpec(f, path) - if spec == nil { - return - } - - name := spec.Name.String() - switch name { - case "": - // If the package name is not explicitly specified, - // make an educated guess. This is not guaranteed to be correct. - lastSlash := strings.LastIndex(path, "/") - if lastSlash == -1 { - name = path - } else { - name = path[lastSlash+1:] - } - case "_", ".": - // Not sure if this import is used - err on the side of caution. - return true - } - - ast.Walk(visitFn(func(n ast.Node) { - sel, ok := n.(*ast.SelectorExpr) - if ok && isTopName(sel.X, name) { - used = true - } - }), f) - - return -} - -type visitFn func(node ast.Node) - -func (fn visitFn) Visit(node ast.Node) ast.Visitor { - fn(node) - return fn -} - -// imports returns true if f imports path. -func imports(f *ast.File, path string) bool { - return importSpec(f, path) != nil -} - -// importSpec returns the import spec if f imports path, -// or nil otherwise. -func importSpec(f *ast.File, path string) *ast.ImportSpec { - for _, s := range f.Imports { - if importPath(s) == path { - return s - } - } - return nil -} - -// importPath returns the unquoted import path of s, -// or "" if the path is not properly quoted. -func importPath(s *ast.ImportSpec) string { - t, err := strconv.Unquote(s.Path.Value) - if err == nil { - return t - } - return "" -} - -// declImports reports whether gen contains an import of path. -func declImports(gen *ast.GenDecl, path string) bool { - if gen.Tok != token.IMPORT { - return false - } - for _, spec := range gen.Specs { - impspec := spec.(*ast.ImportSpec) - if importPath(impspec) == path { - return true - } - } - return false -} - -// matchLen returns the length of the longest path segment prefix shared by x and y. -func matchLen(x, y string) int { - n := 0 - for i := 0; i < len(x) && i < len(y) && x[i] == y[i]; i++ { - if x[i] == '/' { - n++ - } - } - return n -} - -// isTopName returns true if n is a top-level unresolved identifier with the given name. -func isTopName(n ast.Expr, name string) bool { - id, ok := n.(*ast.Ident) - return ok && id.Name == name && id.Obj == nil -} - -// Imports returns the file imports grouped by paragraph. -func Imports(fset *token.FileSet, f *ast.File) [][]*ast.ImportSpec { - var groups [][]*ast.ImportSpec - - for _, decl := range f.Decls { - genDecl, ok := decl.(*ast.GenDecl) - if !ok || genDecl.Tok != token.IMPORT { - break - } - - group := []*ast.ImportSpec{} - - var lastLine int - for _, spec := range genDecl.Specs { - importSpec := spec.(*ast.ImportSpec) - pos := importSpec.Path.ValuePos - line := fset.Position(pos).Line - if lastLine > 0 && pos > 0 && line-lastLine > 1 { - groups = append(groups, group) - group = []*ast.ImportSpec{} - } - group = append(group, importSpec) - lastLine = line - } - groups = append(groups, group) - } - - return groups -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go b/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go deleted file mode 100644 index 8bc348087..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/imports_test.go +++ /dev/null @@ -1,1818 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -import ( - "bytes" - "go/ast" - "go/format" - "go/parser" - "go/token" - "reflect" - "strconv" - "testing" -) - -var fset = token.NewFileSet() - -func parse(t *testing.T, name, in string) *ast.File { - file, err := parser.ParseFile(fset, name, in, parser.ParseComments) - if err != nil { - t.Fatalf("%s parse: %v", name, err) - } - return file -} - -func print(t *testing.T, name string, f *ast.File) string { - var buf bytes.Buffer - if err := format.Node(&buf, fset, f); err != nil { - t.Fatalf("%s gofmt: %v", name, err) - } - return string(buf.Bytes()) -} - -type test struct { - name string - renamedPkg string - pkg string - in string - out string - broken bool // known broken -} - -var addTests = []test{ - { - name: "leave os alone", - pkg: "os", - in: `package main - -import ( - "os" -) -`, - out: `package main - -import ( - "os" -) -`, - }, - { - name: "import.1", - pkg: "os", - in: `package main -`, - out: `package main - -import "os" -`, - }, - { - name: "import.2", - pkg: "os", - in: `package main - -// Comment -import "C" -`, - out: `package main - -// Comment -import "C" -import "os" -`, - }, - { - name: "import.3", - pkg: "os", - in: `package main - -// Comment -import "C" - -import ( - "io" - "utf8" -) -`, - out: `package main - -// Comment -import "C" - -import ( - "io" - "os" - "utf8" -) -`, - }, - { - name: "import.17", - pkg: "x/y/z", - in: `package main - -// Comment -import "C" - -import ( - "a" - "b" - - "x/w" - - "d/f" -) -`, - out: `package main - -// Comment -import "C" - -import ( - "a" - "b" - - "x/w" - "x/y/z" - - "d/f" -) -`, - }, - { - name: "issue #19190", - pkg: "x.org/y/z", - in: `package main - -// Comment -import "C" - -import ( - "bytes" - "os" - - "d.com/f" -) -`, - out: `package main - -// Comment -import "C" - -import ( - "bytes" - "os" - - "d.com/f" - "x.org/y/z" -) -`, - }, - { - name: "issue #19190 with existing grouped import packages", - pkg: "x.org/y/z", - in: `package main - -// Comment -import "C" - -import ( - "bytes" - "os" - - "c.com/f" - "d.com/f" - - "y.com/a" - "y.com/b" - "y.com/c" -) -`, - out: `package main - -// Comment -import "C" - -import ( - "bytes" - "os" - - "c.com/f" - "d.com/f" - "x.org/y/z" - - "y.com/a" - "y.com/b" - "y.com/c" -) -`, - }, - { - name: "issue #19190 - match score is still respected", - pkg: "y.org/c", - in: `package main - -import ( - "x.org/a" - - "y.org/b" -) -`, - out: `package main - -import ( - "x.org/a" - - "y.org/b" - "y.org/c" -) -`, - }, - { - name: "import into singular group", - pkg: "bytes", - in: `package main - -import "os" - -`, - out: `package main - -import ( - "bytes" - "os" -) -`, - }, - { - name: "import into singular group with comment", - pkg: "bytes", - in: `package main - -import /* why */ /* comment here? */ "os" - -`, - out: `package main - -import /* why */ /* comment here? */ ( - "bytes" - "os" -) -`, - }, - { - name: "import into group with leading comment", - pkg: "strings", - in: `package main - -import ( - // comment before bytes - "bytes" - "os" -) - -`, - out: `package main - -import ( - // comment before bytes - "bytes" - "os" - "strings" -) -`, - }, - { - name: "", - renamedPkg: "fmtpkg", - pkg: "fmt", - in: `package main - -import "os" - -`, - out: `package main - -import ( - fmtpkg "fmt" - "os" -) -`, - }, - { - name: "struct comment", - pkg: "time", - in: `package main - -// This is a comment before a struct. -type T struct { - t time.Time -} -`, - out: `package main - -import "time" - -// This is a comment before a struct. -type T struct { - t time.Time -} -`, - }, - { - name: "issue 8729 import C", - pkg: "time", - in: `package main - -import "C" - -// comment -type T time.Time -`, - out: `package main - -import "C" -import "time" - -// comment -type T time.Time -`, - }, - { - name: "issue 8729 empty import", - pkg: "time", - in: `package main - -import () - -// comment -type T time.Time -`, - out: `package main - -import "time" - -// comment -type T time.Time -`, - }, - { - name: "issue 8729 comment on package line", - pkg: "time", - in: `package main // comment - -type T time.Time -`, - out: `package main // comment -import "time" - -type T time.Time -`, - }, - { - name: "issue 8729 comment after package", - pkg: "time", - in: `package main -// comment - -type T time.Time -`, - out: `package main - -import "time" - -// comment - -type T time.Time -`, - }, - { - name: "issue 8729 comment before and on package line", - pkg: "time", - in: `// comment before -package main // comment on - -type T time.Time -`, - out: `// comment before -package main // comment on -import "time" - -type T time.Time -`, - }, - - // Issue 9961: Match prefixes using path segments rather than bytes - { - name: "issue 9961", - pkg: "regexp", - in: `package main - -import ( - "flag" - "testing" - - "rsc.io/p" -) -`, - out: `package main - -import ( - "flag" - "regexp" - "testing" - - "rsc.io/p" -) -`, - }, - // Issue 10337: Preserve comment position - { - name: "issue 10337", - pkg: "fmt", - in: `package main - -import ( - "bytes" // a - "log" // c -) -`, - out: `package main - -import ( - "bytes" // a - "fmt" - "log" // c -) -`, - }, - { - name: "issue 10337 new import at the start", - pkg: "bytes", - in: `package main - -import ( - "fmt" // b - "log" // c -) -`, - out: `package main - -import ( - "bytes" - "fmt" // b - "log" // c -) -`, - }, - { - name: "issue 10337 new import at the end", - pkg: "log", - in: `package main - -import ( - "bytes" // a - "fmt" // b -) -`, - out: `package main - -import ( - "bytes" // a - "fmt" // b - "log" -) -`, - }, - // Issue 14075: Merge import declarations - { - name: "issue 14075", - pkg: "bufio", - in: `package main - -import "bytes" -import "fmt" -`, - out: `package main - -import ( - "bufio" - "bytes" - "fmt" -) -`, - }, - { - name: "issue 14075 update position", - pkg: "bufio", - in: `package main - -import "bytes" -import ( - "fmt" -) -`, - out: `package main - -import ( - "bufio" - "bytes" - "fmt" -) -`, - }, - { - name: `issue 14075 ignore import "C"`, - pkg: "bufio", - in: `package main - -// Comment -import "C" - -import "bytes" -import "fmt" -`, - out: `package main - -// Comment -import "C" - -import ( - "bufio" - "bytes" - "fmt" -) -`, - }, - { - name: `issue 14075 ignore adjacent import "C"`, - pkg: "bufio", - in: `package main - -// Comment -import "C" -import "fmt" -`, - out: `package main - -// Comment -import "C" -import ( - "bufio" - "fmt" -) -`, - }, - { - name: `issue 14075 ignore adjacent import "C" (without factored import)`, - pkg: "bufio", - in: `package main - -// Comment -import "C" -import "fmt" -`, - out: `package main - -// Comment -import "C" -import ( - "bufio" - "fmt" -) -`, - }, - { - name: `issue 14075 ignore single import "C"`, - pkg: "bufio", - in: `package main - -// Comment -import "C" -`, - out: `package main - -// Comment -import "C" -import "bufio" -`, - }, - { - name: `issue 17212 several single-import lines with shared prefix ending in a slash`, - pkg: "net/http", - in: `package main - -import "bufio" -import "net/url" -`, - out: `package main - -import ( - "bufio" - "net/http" - "net/url" -) -`, - }, - { - name: `issue 17212 block imports lines with shared prefix ending in a slash`, - pkg: "net/http", - in: `package main - -import ( - "bufio" - "net/url" -) -`, - out: `package main - -import ( - "bufio" - "net/http" - "net/url" -) -`, - }, - { - name: `issue 17213 many single-import lines`, - pkg: "fmt", - in: `package main - -import "bufio" -import "bytes" -import "errors" -`, - out: `package main - -import ( - "bufio" - "bytes" - "errors" - "fmt" -) -`, - }, -} - -func TestAddImport(t *testing.T) { - for _, test := range addTests { - file := parse(t, test.name, test.in) - var before bytes.Buffer - ast.Fprint(&before, fset, file, nil) - AddNamedImport(fset, file, test.renamedPkg, test.pkg) - if got := print(t, test.name, file); got != test.out { - if test.broken { - t.Logf("%s is known broken:\ngot: %s\nwant: %s", test.name, got, test.out) - } else { - t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out) - } - var after bytes.Buffer - ast.Fprint(&after, fset, file, nil) - - t.Logf("AST before:\n%s\nAST after:\n%s\n", before.String(), after.String()) - } - } -} - -func TestDoubleAddImport(t *testing.T) { - file := parse(t, "doubleimport", "package main\n") - AddImport(fset, file, "os") - AddImport(fset, file, "bytes") - want := `package main - -import ( - "bytes" - "os" -) -` - if got := print(t, "doubleimport", file); got != want { - t.Errorf("got: %s\nwant: %s", got, want) - } -} - -func TestDoubleAddNamedImport(t *testing.T) { - file := parse(t, "doublenamedimport", "package main\n") - AddNamedImport(fset, file, "o", "os") - AddNamedImport(fset, file, "i", "io") - want := `package main - -import ( - i "io" - o "os" -) -` - if got := print(t, "doublenamedimport", file); got != want { - t.Errorf("got: %s\nwant: %s", got, want) - } -} - -// Part of issue 8729. -func TestDoubleAddImportWithDeclComment(t *testing.T) { - file := parse(t, "doubleimport", `package main - -import ( -) - -// comment -type I int -`) - // The AddImport order here matters. - AddImport(fset, file, "golang.org/x/tools/go/ast/astutil") - AddImport(fset, file, "os") - want := `package main - -import ( - "golang.org/x/tools/go/ast/astutil" - "os" -) - -// comment -type I int -` - if got := print(t, "doubleimport_with_decl_comment", file); got != want { - t.Errorf("got: %s\nwant: %s", got, want) - } -} - -var deleteTests = []test{ - { - name: "import.4", - pkg: "os", - in: `package main - -import ( - "os" -) -`, - out: `package main -`, - }, - { - name: "import.5", - pkg: "os", - in: `package main - -// Comment -import "C" -import "os" -`, - out: `package main - -// Comment -import "C" -`, - }, - { - name: "import.6", - pkg: "os", - in: `package main - -// Comment -import "C" - -import ( - "io" - "os" - "utf8" -) -`, - out: `package main - -// Comment -import "C" - -import ( - "io" - "utf8" -) -`, - }, - { - name: "import.7", - pkg: "io", - in: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - out: `package main - -import ( - // a - "os" // b - "utf8" // c -) -`, - }, - { - name: "import.8", - pkg: "os", - in: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - out: `package main - -import ( - "io" // a - // b - "utf8" // c -) -`, - }, - { - name: "import.9", - pkg: "utf8", - in: `package main - -import ( - "io" // a - "os" // b - "utf8" // c -) -`, - out: `package main - -import ( - "io" // a - "os" // b - // c -) -`, - }, - { - name: "import.10", - pkg: "io", - in: `package main - -import ( - "io" - "os" - "utf8" -) -`, - out: `package main - -import ( - "os" - "utf8" -) -`, - }, - { - name: "import.11", - pkg: "os", - in: `package main - -import ( - "io" - "os" - "utf8" -) -`, - out: `package main - -import ( - "io" - "utf8" -) -`, - }, - { - name: "import.12", - pkg: "utf8", - in: `package main - -import ( - "io" - "os" - "utf8" -) -`, - out: `package main - -import ( - "io" - "os" -) -`, - }, - { - name: "handle.raw.quote.imports", - pkg: "os", - in: "package main\n\nimport `os`", - out: `package main -`, - }, - { - name: "import.13", - pkg: "io", - in: `package main - -import ( - "fmt" - - "io" - "os" - "utf8" - - "go/format" -) -`, - out: `package main - -import ( - "fmt" - - "os" - "utf8" - - "go/format" -) -`, - }, - { - name: "import.14", - pkg: "io", - in: `package main - -import ( - "fmt" // a - - "io" // b - "os" // c - "utf8" // d - - "go/format" // e -) -`, - out: `package main - -import ( - "fmt" // a - - // b - "os" // c - "utf8" // d - - "go/format" // e -) -`, - }, - { - name: "import.15", - pkg: "double", - in: `package main - -import ( - "double" - "double" -) -`, - out: `package main -`, - }, - { - name: "import.16", - pkg: "bubble", - in: `package main - -import ( - "toil" - "bubble" - "bubble" - "trouble" -) -`, - out: `package main - -import ( - "toil" - "trouble" -) -`, - }, - { - name: "import.17", - pkg: "quad", - in: `package main - -import ( - "quad" - "quad" -) - -import ( - "quad" - "quad" -) -`, - out: `package main -`, - }, - { - name: "import.18", - renamedPkg: "x", - pkg: "fmt", - in: `package main - -import ( - "fmt" - x "fmt" -) -`, - out: `package main - -import ( - "fmt" -) -`, - }, - { - name: "import.18", - renamedPkg: "x", - pkg: "fmt", - in: `package main - -import x "fmt" -import y "fmt" -`, - out: `package main - -import y "fmt" -`, - }, - // Issue #15432, #18051 - { - name: "import.19", - pkg: "fmt", - in: `package main - -import ( - "fmt" - - // Some comment. - "io" -)`, - out: `package main - -import ( - // Some comment. - "io" -) -`, - }, - { - name: "import.20", - pkg: "fmt", - in: `package main - -import ( - "fmt" - - // Some - // comment. - "io" -)`, - out: `package main - -import ( - // Some - // comment. - "io" -) -`, - }, - { - name: "import.21", - pkg: "fmt", - in: `package main - -import ( - "fmt" - - /* - Some - comment. - */ - "io" -)`, - out: `package main - -import ( - /* - Some - comment. - */ - "io" -) -`, - }, - { - name: "import.22", - pkg: "fmt", - in: `package main - -import ( - /* Some */ - // comment. - "io" - "fmt" -)`, - out: `package main - -import ( - /* Some */ - // comment. - "io" -) -`, - }, - { - name: "import.23", - pkg: "fmt", - in: `package main - -import ( - // comment 1 - "fmt" - // comment 2 - "io" -)`, - out: `package main - -import ( - // comment 2 - "io" -) -`, - }, - { - name: "import.24", - pkg: "fmt", - in: `package main - -import ( - "fmt" // comment 1 - "io" // comment 2 -)`, - out: `package main - -import ( - "io" // comment 2 -) -`, - }, - { - name: "import.25", - pkg: "fmt", - in: `package main - -import ( - "fmt" - /* comment */ "io" -)`, - out: `package main - -import ( - /* comment */ "io" -) -`, - }, - { - name: "import.26", - pkg: "fmt", - in: `package main - -import ( - "fmt" - "io" /* comment */ -)`, - out: `package main - -import ( - "io" /* comment */ -) -`, - }, - { - name: "import.27", - pkg: "fmt", - in: `package main - -import ( - "fmt" /* comment */ - "io" -)`, - out: `package main - -import ( - "io" -) -`, - }, - { - name: "import.28", - pkg: "fmt", - in: `package main - -import ( - /* comment */ "fmt" - "io" -)`, - out: `package main - -import ( - "io" -) -`, - }, - { - name: "import.29", - pkg: "fmt", - in: `package main - -// comment 1 -import ( - "fmt" - "io" // comment 2 -)`, - out: `package main - -// comment 1 -import ( - "io" // comment 2 -) -`, - }, - { - name: "import.30", - pkg: "fmt", - in: `package main - -// comment 1 -import ( - "fmt" // comment 2 - "io" -)`, - out: `package main - -// comment 1 -import ( - "io" -) -`, - }, - { - name: "import.31", - pkg: "fmt", - in: `package main - -// comment 1 -import ( - "fmt" - /* comment 2 */ "io" -)`, - out: `package main - -// comment 1 -import ( - /* comment 2 */ "io" -) -`, - }, - { - name: "import.32", - pkg: "fmt", - renamedPkg: "f", - in: `package main - -// comment 1 -import ( - f "fmt" - /* comment 2 */ i "io" -)`, - out: `package main - -// comment 1 -import ( - /* comment 2 */ i "io" -) -`, - }, - { - name: "import.33", - pkg: "fmt", - renamedPkg: "f", - in: `package main - -// comment 1 -import ( - /* comment 2 */ f "fmt" - i "io" -)`, - out: `package main - -// comment 1 -import ( - i "io" -) -`, - }, - { - name: "import.34", - pkg: "fmt", - renamedPkg: "f", - in: `package main - -// comment 1 -import ( - f "fmt" /* comment 2 */ - i "io" -)`, - out: `package main - -// comment 1 -import ( - i "io" -) -`, - }, - { - name: "import.35", - pkg: "fmt", - in: `package main - -// comment 1 -import ( - "fmt" - // comment 2 - "io" -)`, - out: `package main - -// comment 1 -import ( - // comment 2 - "io" -) -`, - }, - { - name: "import.36", - pkg: "fmt", - in: `package main - -/* comment 1 */ -import ( - "fmt" - /* comment 2 */ - "io" -)`, - out: `package main - -/* comment 1 */ -import ( - /* comment 2 */ - "io" -) -`, - }, - - // Issue 20229: MergeLine panic on weird input - { - name: "import.37", - pkg: "io", - in: `package main -import("_" -"io")`, - out: `package main - -import ( - "_" -) -`, - }, -} - -func TestDeleteImport(t *testing.T) { - for _, test := range deleteTests { - file := parse(t, test.name, test.in) - DeleteNamedImport(fset, file, test.renamedPkg, test.pkg) - if got := print(t, test.name, file); got != test.out { - t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out) - } - } -} - -type rewriteTest struct { - name string - srcPkg string - dstPkg string - in string - out string -} - -var rewriteTests = []rewriteTest{ - { - name: "import.13", - srcPkg: "utf8", - dstPkg: "encoding/utf8", - in: `package main - -import ( - "io" - "os" - "utf8" // thanks ken -) -`, - out: `package main - -import ( - "encoding/utf8" // thanks ken - "io" - "os" -) -`, - }, - { - name: "import.14", - srcPkg: "asn1", - dstPkg: "encoding/asn1", - in: `package main - -import ( - "asn1" - "crypto" - "crypto/rsa" - _ "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "time" -) - -var x = 1 -`, - out: `package main - -import ( - "crypto" - "crypto/rsa" - _ "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "time" -) - -var x = 1 -`, - }, - { - name: "import.15", - srcPkg: "url", - dstPkg: "net/url", - in: `package main - -import ( - "bufio" - "net" - "path" - "url" -) - -var x = 1 // comment on x, not on url -`, - out: `package main - -import ( - "bufio" - "net" - "net/url" - "path" -) - -var x = 1 // comment on x, not on url -`, - }, - { - name: "import.16", - srcPkg: "http", - dstPkg: "net/http", - in: `package main - -import ( - "flag" - "http" - "log" - "text/template" -) - -var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 -`, - out: `package main - -import ( - "flag" - "log" - "net/http" - "text/template" -) - -var addr = flag.String("addr", ":1718", "http service address") // Q=17, R=18 -`, - }, -} - -func TestRewriteImport(t *testing.T) { - for _, test := range rewriteTests { - file := parse(t, test.name, test.in) - RewriteImport(fset, file, test.srcPkg, test.dstPkg) - if got := print(t, test.name, file); got != test.out { - t.Errorf("%s:\ngot: %s\nwant: %s", test.name, got, test.out) - } - } -} - -var importsTests = []struct { - name string - in string - want [][]string -}{ - { - name: "no packages", - in: `package foo -`, - want: nil, - }, - { - name: "one group", - in: `package foo - -import ( - "fmt" - "testing" -) -`, - want: [][]string{{"fmt", "testing"}}, - }, - { - name: "four groups", - in: `package foo - -import "C" -import ( - "fmt" - "testing" - - "appengine" - - "myproject/mylib1" - "myproject/mylib2" -) -`, - want: [][]string{ - {"C"}, - {"fmt", "testing"}, - {"appengine"}, - {"myproject/mylib1", "myproject/mylib2"}, - }, - }, - { - name: "multiple factored groups", - in: `package foo - -import ( - "fmt" - "testing" - - "appengine" -) -import ( - "reflect" - - "bytes" -) -`, - want: [][]string{ - {"fmt", "testing"}, - {"appengine"}, - {"reflect"}, - {"bytes"}, - }, - }, -} - -func unquote(s string) string { - res, err := strconv.Unquote(s) - if err != nil { - return "could_not_unquote" - } - return res -} - -func TestImports(t *testing.T) { - fset := token.NewFileSet() - for _, test := range importsTests { - f, err := parser.ParseFile(fset, "test.go", test.in, 0) - if err != nil { - t.Errorf("%s: %v", test.name, err) - continue - } - var got [][]string - for _, group := range Imports(fset, f) { - var b []string - for _, spec := range group { - b = append(b, unquote(spec.Path.Value)) - } - got = append(got, b) - } - if !reflect.DeepEqual(got, test.want) { - t.Errorf("Imports(%s)=%v, want %v", test.name, got, test.want) - } - } -} - -var usesImportTests = []struct { - name string - path string - in string - want bool -}{ - { - name: "no packages", - path: "io", - in: `package foo -`, - want: false, - }, - { - name: "import.1", - path: "io", - in: `package foo - -import "io" - -var _ io.Writer -`, - want: true, - }, - { - name: "import.2", - path: "io", - in: `package foo - -import "io" -`, - want: false, - }, - { - name: "import.3", - path: "io", - in: `package foo - -import "io" - -var io = 42 -`, - want: false, - }, - { - name: "import.4", - path: "io", - in: `package foo - -import i "io" - -var _ i.Writer -`, - want: true, - }, - { - name: "import.5", - path: "io", - in: `package foo - -import i "io" -`, - want: false, - }, - { - name: "import.6", - path: "io", - in: `package foo - -import i "io" - -var i = 42 -var io = 42 -`, - want: false, - }, - { - name: "import.7", - path: "encoding/json", - in: `package foo - -import "encoding/json" - -var _ json.Encoder -`, - want: true, - }, - { - name: "import.8", - path: "encoding/json", - in: `package foo - -import "encoding/json" -`, - want: false, - }, - { - name: "import.9", - path: "encoding/json", - in: `package foo - -import "encoding/json" - -var json = 42 -`, - want: false, - }, - { - name: "import.10", - path: "encoding/json", - in: `package foo - -import j "encoding/json" - -var _ j.Encoder -`, - want: true, - }, - { - name: "import.11", - path: "encoding/json", - in: `package foo - -import j "encoding/json" -`, - want: false, - }, - { - name: "import.12", - path: "encoding/json", - in: `package foo - -import j "encoding/json" - -var j = 42 -var json = 42 -`, - want: false, - }, - { - name: "import.13", - path: "io", - in: `package foo - -import _ "io" -`, - want: true, - }, - { - name: "import.14", - path: "io", - in: `package foo - -import . "io" -`, - want: true, - }, -} - -func TestUsesImport(t *testing.T) { - fset := token.NewFileSet() - for _, test := range usesImportTests { - f, err := parser.ParseFile(fset, "test.go", test.in, 0) - if err != nil { - t.Errorf("%s: %v", test.name, err) - continue - } - got := UsesImport(f, test.path) - if got != test.want { - t.Errorf("UsesImport(%s)=%v, want %v", test.name, got, test.want) - } - } -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go deleted file mode 100644 index cf72ea990..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite.go +++ /dev/null @@ -1,477 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil - -import ( - "fmt" - "go/ast" - "reflect" - "sort" -) - -// An ApplyFunc is invoked by Apply for each node n, even if n is nil, -// before and/or after the node's children, using a Cursor describing -// the current node and providing operations on it. -// -// The return value of ApplyFunc controls the syntax tree traversal. -// See Apply for details. -type ApplyFunc func(*Cursor) bool - -// Apply traverses a syntax tree recursively, starting with root, -// and calling pre and post for each node as described below. -// Apply returns the syntax tree, possibly modified. -// -// If pre is not nil, it is called for each node before the node's -// children are traversed (pre-order). If pre returns false, no -// children are traversed, and post is not called for that node. -// -// If post is not nil, and a prior call of pre didn't return false, -// post is called for each node after its children are traversed -// (post-order). If post returns false, traversal is terminated and -// Apply returns immediately. -// -// Only fields that refer to AST nodes are considered children; -// i.e., token.Pos, Scopes, Objects, and fields of basic types -// (strings, etc.) are ignored. -// -// Children are traversed in the order in which they appear in the -// respective node's struct definition. A package's files are -// traversed in the filenames' alphabetical order. -// -func Apply(root ast.Node, pre, post ApplyFunc) (result ast.Node) { - parent := &struct{ ast.Node }{root} - defer func() { - if r := recover(); r != nil && r != abort { - panic(r) - } - result = parent.Node - }() - a := &application{pre: pre, post: post} - a.apply(parent, "Node", nil, root) - return -} - -var abort = new(int) // singleton, to signal termination of Apply - -// A Cursor describes a node encountered during Apply. -// Information about the node and its parent is available -// from the Node, Parent, Name, and Index methods. -// -// If p is a variable of type and value of the current parent node -// c.Parent(), and f is the field identifier with name c.Name(), -// the following invariants hold: -// -// p.f == c.Node() if c.Index() < 0 -// p.f[c.Index()] == c.Node() if c.Index() >= 0 -// -// The methods Replace, Delete, InsertBefore, and InsertAfter -// can be used to change the AST without disrupting Apply. -type Cursor struct { - parent ast.Node - name string - iter *iterator // valid if non-nil - node ast.Node -} - -// Node returns the current Node. -func (c *Cursor) Node() ast.Node { return c.node } - -// Parent returns the parent of the current Node. -func (c *Cursor) Parent() ast.Node { return c.parent } - -// Name returns the name of the parent Node field that contains the current Node. -// If the parent is a *ast.Package and the current Node is a *ast.File, Name returns -// the filename for the current Node. -func (c *Cursor) Name() string { return c.name } - -// Index reports the index >= 0 of the current Node in the slice of Nodes that -// contains it, or a value < 0 if the current Node is not part of a slice. -// The index of the current node changes if InsertBefore is called while -// processing the current node. -func (c *Cursor) Index() int { - if c.iter != nil { - return c.iter.index - } - return -1 -} - -// field returns the current node's parent field value. -func (c *Cursor) field() reflect.Value { - return reflect.Indirect(reflect.ValueOf(c.parent)).FieldByName(c.name) -} - -// Replace replaces the current Node with n. -// The replacement node is not walked by Apply. -func (c *Cursor) Replace(n ast.Node) { - if _, ok := c.node.(*ast.File); ok { - file, ok := n.(*ast.File) - if !ok { - panic("attempt to replace *ast.File with non-*ast.File") - } - c.parent.(*ast.Package).Files[c.name] = file - return - } - - v := c.field() - if i := c.Index(); i >= 0 { - v = v.Index(i) - } - v.Set(reflect.ValueOf(n)) -} - -// Delete deletes the current Node from its containing slice. -// If the current Node is not part of a slice, Delete panics. -// As a special case, if the current node is a package file, -// Delete removes it from the package's Files map. -func (c *Cursor) Delete() { - if _, ok := c.node.(*ast.File); ok { - delete(c.parent.(*ast.Package).Files, c.name) - return - } - - i := c.Index() - if i < 0 { - panic("Delete node not contained in slice") - } - v := c.field() - l := v.Len() - reflect.Copy(v.Slice(i, l), v.Slice(i+1, l)) - v.Index(l - 1).Set(reflect.Zero(v.Type().Elem())) - v.SetLen(l - 1) - c.iter.step-- -} - -// InsertAfter inserts n after the current Node in its containing slice. -// If the current Node is not part of a slice, InsertAfter panics. -// Apply does not walk n. -func (c *Cursor) InsertAfter(n ast.Node) { - i := c.Index() - if i < 0 { - panic("InsertAfter node not contained in slice") - } - v := c.field() - v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) - l := v.Len() - reflect.Copy(v.Slice(i+2, l), v.Slice(i+1, l)) - v.Index(i + 1).Set(reflect.ValueOf(n)) - c.iter.step++ -} - -// InsertBefore inserts n before the current Node in its containing slice. -// If the current Node is not part of a slice, InsertBefore panics. -// Apply will not walk n. -func (c *Cursor) InsertBefore(n ast.Node) { - i := c.Index() - if i < 0 { - panic("InsertBefore node not contained in slice") - } - v := c.field() - v.Set(reflect.Append(v, reflect.Zero(v.Type().Elem()))) - l := v.Len() - reflect.Copy(v.Slice(i+1, l), v.Slice(i, l)) - v.Index(i).Set(reflect.ValueOf(n)) - c.iter.index++ -} - -// application carries all the shared data so we can pass it around cheaply. -type application struct { - pre, post ApplyFunc - cursor Cursor - iter iterator -} - -func (a *application) apply(parent ast.Node, name string, iter *iterator, n ast.Node) { - // convert typed nil into untyped nil - if v := reflect.ValueOf(n); v.Kind() == reflect.Ptr && v.IsNil() { - n = nil - } - - // avoid heap-allocating a new cursor for each apply call; reuse a.cursor instead - saved := a.cursor - a.cursor.parent = parent - a.cursor.name = name - a.cursor.iter = iter - a.cursor.node = n - - if a.pre != nil && !a.pre(&a.cursor) { - a.cursor = saved - return - } - - // walk children - // (the order of the cases matches the order of the corresponding node types in go/ast) - switch n := n.(type) { - case nil: - // nothing to do - - // Comments and fields - case *ast.Comment: - // nothing to do - - case *ast.CommentGroup: - if n != nil { - a.applyList(n, "List") - } - - case *ast.Field: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Names") - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Tag", nil, n.Tag) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.FieldList: - a.applyList(n, "List") - - // Expressions - case *ast.BadExpr, *ast.Ident, *ast.BasicLit: - // nothing to do - - case *ast.Ellipsis: - a.apply(n, "Elt", nil, n.Elt) - - case *ast.FuncLit: - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Body", nil, n.Body) - - case *ast.CompositeLit: - a.apply(n, "Type", nil, n.Type) - a.applyList(n, "Elts") - - case *ast.ParenExpr: - a.apply(n, "X", nil, n.X) - - case *ast.SelectorExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Sel", nil, n.Sel) - - case *ast.IndexExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Index", nil, n.Index) - - case *ast.SliceExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Low", nil, n.Low) - a.apply(n, "High", nil, n.High) - a.apply(n, "Max", nil, n.Max) - - case *ast.TypeAssertExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Type", nil, n.Type) - - case *ast.CallExpr: - a.apply(n, "Fun", nil, n.Fun) - a.applyList(n, "Args") - - case *ast.StarExpr: - a.apply(n, "X", nil, n.X) - - case *ast.UnaryExpr: - a.apply(n, "X", nil, n.X) - - case *ast.BinaryExpr: - a.apply(n, "X", nil, n.X) - a.apply(n, "Y", nil, n.Y) - - case *ast.KeyValueExpr: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - - // Types - case *ast.ArrayType: - a.apply(n, "Len", nil, n.Len) - a.apply(n, "Elt", nil, n.Elt) - - case *ast.StructType: - a.apply(n, "Fields", nil, n.Fields) - - case *ast.FuncType: - a.apply(n, "Params", nil, n.Params) - a.apply(n, "Results", nil, n.Results) - - case *ast.InterfaceType: - a.apply(n, "Methods", nil, n.Methods) - - case *ast.MapType: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - - case *ast.ChanType: - a.apply(n, "Value", nil, n.Value) - - // Statements - case *ast.BadStmt: - // nothing to do - - case *ast.DeclStmt: - a.apply(n, "Decl", nil, n.Decl) - - case *ast.EmptyStmt: - // nothing to do - - case *ast.LabeledStmt: - a.apply(n, "Label", nil, n.Label) - a.apply(n, "Stmt", nil, n.Stmt) - - case *ast.ExprStmt: - a.apply(n, "X", nil, n.X) - - case *ast.SendStmt: - a.apply(n, "Chan", nil, n.Chan) - a.apply(n, "Value", nil, n.Value) - - case *ast.IncDecStmt: - a.apply(n, "X", nil, n.X) - - case *ast.AssignStmt: - a.applyList(n, "Lhs") - a.applyList(n, "Rhs") - - case *ast.GoStmt: - a.apply(n, "Call", nil, n.Call) - - case *ast.DeferStmt: - a.apply(n, "Call", nil, n.Call) - - case *ast.ReturnStmt: - a.applyList(n, "Results") - - case *ast.BranchStmt: - a.apply(n, "Label", nil, n.Label) - - case *ast.BlockStmt: - a.applyList(n, "List") - - case *ast.IfStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Cond", nil, n.Cond) - a.apply(n, "Body", nil, n.Body) - a.apply(n, "Else", nil, n.Else) - - case *ast.CaseClause: - a.applyList(n, "List") - a.applyList(n, "Body") - - case *ast.SwitchStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Tag", nil, n.Tag) - a.apply(n, "Body", nil, n.Body) - - case *ast.TypeSwitchStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Assign", nil, n.Assign) - a.apply(n, "Body", nil, n.Body) - - case *ast.CommClause: - a.apply(n, "Comm", nil, n.Comm) - a.applyList(n, "Body") - - case *ast.SelectStmt: - a.apply(n, "Body", nil, n.Body) - - case *ast.ForStmt: - a.apply(n, "Init", nil, n.Init) - a.apply(n, "Cond", nil, n.Cond) - a.apply(n, "Post", nil, n.Post) - a.apply(n, "Body", nil, n.Body) - - case *ast.RangeStmt: - a.apply(n, "Key", nil, n.Key) - a.apply(n, "Value", nil, n.Value) - a.apply(n, "X", nil, n.X) - a.apply(n, "Body", nil, n.Body) - - // Declarations - case *ast.ImportSpec: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Path", nil, n.Path) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.ValueSpec: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Names") - a.apply(n, "Type", nil, n.Type) - a.applyList(n, "Values") - a.apply(n, "Comment", nil, n.Comment) - - case *ast.TypeSpec: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Comment", nil, n.Comment) - - case *ast.BadDecl: - // nothing to do - - case *ast.GenDecl: - a.apply(n, "Doc", nil, n.Doc) - a.applyList(n, "Specs") - - case *ast.FuncDecl: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Recv", nil, n.Recv) - a.apply(n, "Name", nil, n.Name) - a.apply(n, "Type", nil, n.Type) - a.apply(n, "Body", nil, n.Body) - - // Files and packages - case *ast.File: - a.apply(n, "Doc", nil, n.Doc) - a.apply(n, "Name", nil, n.Name) - a.applyList(n, "Decls") - // Don't walk n.Comments; they have either been walked already if - // they are Doc comments, or they can be easily walked explicitly. - - case *ast.Package: - // collect and sort names for reproducible behavior - var names []string - for name := range n.Files { - names = append(names, name) - } - sort.Strings(names) - for _, name := range names { - a.apply(n, name, nil, n.Files[name]) - } - - default: - panic(fmt.Sprintf("Apply: unexpected node type %T", n)) - } - - if a.post != nil && !a.post(&a.cursor) { - panic(abort) - } - - a.cursor = saved -} - -// An iterator controls iteration over a slice of nodes. -type iterator struct { - index, step int -} - -func (a *application) applyList(parent ast.Node, name string) { - // avoid heap-allocating a new iterator for each applyList call; reuse a.iter instead - saved := a.iter - a.iter.index = 0 - for { - // must reload parent.name each time, since cursor modifications might change it - v := reflect.Indirect(reflect.ValueOf(parent)).FieldByName(name) - if a.iter.index >= v.Len() { - break - } - - // element x may be nil in a bad AST - be cautious - var x ast.Node - if e := v.Index(a.iter.index); e.IsValid() { - x = e.Interface().(ast.Node) - } - - a.iter.step = 1 - a.apply(parent, name, &a.iter, x) - a.iter.index += a.iter.step - } - a.iter = saved -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go b/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go deleted file mode 100644 index 1c86970ff..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/rewrite_test.go +++ /dev/null @@ -1,248 +0,0 @@ -// Copyright 2017 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package astutil_test - -import ( - "bytes" - "go/ast" - "go/format" - "go/parser" - "go/token" - "testing" - - "golang.org/x/tools/go/ast/astutil" -) - -var rewriteTests = [...]struct { - name string - orig, want string - pre, post astutil.ApplyFunc -}{ - {name: "nop", orig: "package p\n", want: "package p\n"}, - - {name: "replace", - orig: `package p - -var x int -`, - want: `package p - -var t T -`, - post: func(c *astutil.Cursor) bool { - if _, ok := c.Node().(*ast.ValueSpec); ok { - c.Replace(valspec("t", "T")) - return false - } - return true - }, - }, - - {name: "set doc strings", - orig: `package p - -const z = 0 - -type T struct{} - -var x int -`, - want: `package p -// a foo is a foo -const z = 0 -// a foo is a foo -type T struct{} -// a foo is a foo -var x int -`, - post: func(c *astutil.Cursor) bool { - if _, ok := c.Parent().(*ast.GenDecl); ok && c.Name() == "Doc" && c.Node() == nil { - c.Replace(&ast.CommentGroup{List: []*ast.Comment{{Text: "// a foo is a foo"}}}) - } - return true - }, - }, - - {name: "insert names", - orig: `package p - -const a = 1 -`, - want: `package p - -const a, b, c = 1, 2, 3 -`, - pre: func(c *astutil.Cursor) bool { - if _, ok := c.Parent().(*ast.ValueSpec); ok { - switch c.Name() { - case "Names": - c.InsertAfter(ast.NewIdent("c")) - c.InsertAfter(ast.NewIdent("b")) - case "Values": - c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "3"}) - c.InsertAfter(&ast.BasicLit{Kind: token.INT, Value: "2"}) - } - } - return true - }, - }, - - {name: "insert", - orig: `package p - -var ( - x int - y int -) -`, - want: `package p - -var before1 int -var before2 int - -var ( - x int - y int -) -var after2 int -var after1 int -`, - pre: func(c *astutil.Cursor) bool { - if _, ok := c.Node().(*ast.GenDecl); ok { - c.InsertBefore(vardecl("before1", "int")) - c.InsertAfter(vardecl("after1", "int")) - c.InsertAfter(vardecl("after2", "int")) - c.InsertBefore(vardecl("before2", "int")) - } - return true - }, - }, - - {name: "delete", - orig: `package p - -var x int -var y int -var z int -`, - want: `package p - -var y int -var z int -`, - pre: func(c *astutil.Cursor) bool { - n := c.Node() - if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" { - c.Delete() - } - return true - }, - }, - - {name: "insertafter-delete", - orig: `package p - -var x int -var y int -var z int -`, - want: `package p - -var x1 int - -var y int -var z int -`, - pre: func(c *astutil.Cursor) bool { - n := c.Node() - if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" { - c.InsertAfter(vardecl("x1", "int")) - c.Delete() - } - return true - }, - }, - - {name: "delete-insertafter", - orig: `package p - -var x int -var y int -var z int -`, - want: `package p - -var y int -var x1 int -var z int -`, - pre: func(c *astutil.Cursor) bool { - n := c.Node() - if d, ok := n.(*ast.GenDecl); ok && d.Specs[0].(*ast.ValueSpec).Names[0].Name == "x" { - c.Delete() - // The cursor is now effectively atop the 'var y int' node. - c.InsertAfter(vardecl("x1", "int")) - } - return true - }, - }, -} - -func valspec(name, typ string) *ast.ValueSpec { - return &ast.ValueSpec{Names: []*ast.Ident{ast.NewIdent(name)}, - Type: ast.NewIdent(typ), - } -} - -func vardecl(name, typ string) *ast.GenDecl { - return &ast.GenDecl{ - Tok: token.VAR, - Specs: []ast.Spec{valspec(name, typ)}, - } -} - -func TestRewrite(t *testing.T) { - t.Run("*", func(t *testing.T) { - for _, test := range rewriteTests { - test := test - t.Run(test.name, func(t *testing.T) { - t.Parallel() - fset := token.NewFileSet() - f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments) - if err != nil { - t.Fatal(err) - } - n := astutil.Apply(f, test.pre, test.post) - var buf bytes.Buffer - if err := format.Node(&buf, fset, n); err != nil { - t.Fatal(err) - } - got := buf.String() - if got != test.want { - t.Errorf("got:\n\n%s\nwant:\n\n%s\n", got, test.want) - } - }) - } - }) -} - -var sink ast.Node - -func BenchmarkRewrite(b *testing.B) { - for _, test := range rewriteTests { - b.Run(test.name, func(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - fset := token.NewFileSet() - f, err := parser.ParseFile(fset, test.name, test.orig, parser.ParseComments) - if err != nil { - b.Fatal(err) - } - b.StartTimer() - sink = astutil.Apply(f, test.pre, test.post) - } - }) - } -} diff --git a/vendor/golang.org/x/tools/go/ast/astutil/util.go b/vendor/golang.org/x/tools/go/ast/astutil/util.go deleted file mode 100644 index 763062982..000000000 --- a/vendor/golang.org/x/tools/go/ast/astutil/util.go +++ /dev/null @@ -1,14 +0,0 @@ -package astutil - -import "go/ast" - -// Unparen returns e with any enclosing parentheses stripped. -func Unparen(e ast.Expr) ast.Expr { - for { - p, ok := e.(*ast.ParenExpr) - if !ok { - return e - } - e = p.X - } -}