2019-05-10 14:35:43 -06:00
|
|
|
// Copyright 2019 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2019-04-22 14:04:44 -06:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
2019-07-09 15:52:23 -06:00
|
|
|
"bytes"
|
2019-04-22 14:04:44 -06:00
|
|
|
"context"
|
|
|
|
"go/ast"
|
|
|
|
"go/parser"
|
|
|
|
"go/scanner"
|
|
|
|
"go/token"
|
2019-09-06 15:22:54 -06:00
|
|
|
"reflect"
|
2019-04-22 14:04:44 -06:00
|
|
|
|
2019-09-09 22:36:39 -06:00
|
|
|
"golang.org/x/tools/internal/lsp/protocol"
|
2019-06-04 20:14:37 -06:00
|
|
|
"golang.org/x/tools/internal/lsp/source"
|
2019-07-09 18:16:21 -06:00
|
|
|
"golang.org/x/tools/internal/lsp/telemetry"
|
2019-06-04 20:14:37 -06:00
|
|
|
"golang.org/x/tools/internal/memoize"
|
2019-09-09 22:36:39 -06:00
|
|
|
"golang.org/x/tools/internal/span"
|
2019-08-13 13:07:39 -06:00
|
|
|
"golang.org/x/tools/internal/telemetry/log"
|
|
|
|
"golang.org/x/tools/internal/telemetry/trace"
|
2019-08-06 13:13:11 -06:00
|
|
|
errors "golang.org/x/xerrors"
|
2019-04-22 14:04:44 -06:00
|
|
|
)
|
|
|
|
|
2019-08-15 06:54:27 -06:00
|
|
|
// Limits the number of parallel parser calls per process.
|
|
|
|
var parseLimit = make(chan struct{}, 20)
|
2019-06-10 06:58:14 -06:00
|
|
|
|
2019-06-13 13:55:53 -06:00
|
|
|
// parseKey uniquely identifies a parsed Go file.
|
2019-06-04 20:14:37 -06:00
|
|
|
type parseKey struct {
|
|
|
|
file source.FileIdentity
|
|
|
|
mode source.ParseMode
|
|
|
|
}
|
|
|
|
|
|
|
|
type parseGoHandle struct {
|
|
|
|
handle *memoize.Handle
|
|
|
|
file source.FileHandle
|
|
|
|
mode source.ParseMode
|
|
|
|
}
|
|
|
|
|
|
|
|
type parseGoData struct {
|
|
|
|
memoize.NoCopy
|
2019-06-13 13:55:53 -06:00
|
|
|
|
2019-09-17 09:19:11 -06:00
|
|
|
ast *ast.File
|
|
|
|
parseError error // errors associated with parsing the file
|
|
|
|
mapper *protocol.ColumnMapper
|
|
|
|
err error
|
2019-05-17 08:51:19 -06:00
|
|
|
}
|
|
|
|
|
2019-06-13 13:55:53 -06:00
|
|
|
func (c *cache) ParseGoHandle(fh source.FileHandle, mode source.ParseMode) source.ParseGoHandle {
|
2019-06-04 20:14:37 -06:00
|
|
|
key := parseKey{
|
|
|
|
file: fh.Identity(),
|
|
|
|
mode: mode,
|
|
|
|
}
|
|
|
|
h := c.store.Bind(key, func(ctx context.Context) interface{} {
|
|
|
|
data := &parseGoData{}
|
2019-09-17 09:19:11 -06:00
|
|
|
data.ast, data.mapper, data.parseError, data.err = parseGo(ctx, c, fh, mode)
|
2019-06-04 20:14:37 -06:00
|
|
|
return data
|
|
|
|
})
|
|
|
|
return &parseGoHandle{
|
|
|
|
handle: h,
|
2019-06-21 15:00:02 -06:00
|
|
|
file: fh,
|
|
|
|
mode: mode,
|
2019-06-04 20:14:37 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *parseGoHandle) File() source.FileHandle {
|
|
|
|
return h.file
|
|
|
|
}
|
|
|
|
|
|
|
|
func (h *parseGoHandle) Mode() source.ParseMode {
|
|
|
|
return h.mode
|
|
|
|
}
|
|
|
|
|
2019-09-17 09:19:11 -06:00
|
|
|
func (h *parseGoHandle) Parse(ctx context.Context) (*ast.File, *protocol.ColumnMapper, error, error) {
|
2019-06-04 20:14:37 -06:00
|
|
|
v := h.handle.Get(ctx)
|
|
|
|
if v == nil {
|
2019-10-01 13:21:06 -06:00
|
|
|
return nil, nil, nil, errors.Errorf("no parsed file for %s", h.File().Identity().URI)
|
2019-06-04 20:14:37 -06:00
|
|
|
}
|
|
|
|
data := v.(*parseGoData)
|
2019-09-17 09:19:11 -06:00
|
|
|
return data.ast, data.mapper, data.parseError, data.err
|
2019-06-04 20:14:37 -06:00
|
|
|
}
|
|
|
|
|
2019-10-24 13:44:41 -06:00
|
|
|
func (h *parseGoHandle) Cached() (*ast.File, *protocol.ColumnMapper, error, error) {
|
2019-08-06 16:51:17 -06:00
|
|
|
v := h.handle.Cached()
|
|
|
|
if v == nil {
|
2019-09-17 09:19:11 -06:00
|
|
|
return nil, nil, nil, errors.Errorf("no cached AST for %s", h.file.Identity().URI)
|
2019-08-06 16:51:17 -06:00
|
|
|
}
|
|
|
|
data := v.(*parseGoData)
|
2019-09-17 09:19:11 -06:00
|
|
|
return data.ast, data.mapper, data.parseError, data.err
|
2019-08-06 16:51:17 -06:00
|
|
|
}
|
|
|
|
|
2019-07-09 15:52:23 -06:00
|
|
|
func hashParseKey(ph source.ParseGoHandle) string {
|
|
|
|
b := bytes.NewBuffer(nil)
|
|
|
|
b.WriteString(ph.File().Identity().String())
|
|
|
|
b.WriteString(string(ph.Mode()))
|
|
|
|
return hashContents(b.Bytes())
|
|
|
|
}
|
|
|
|
|
|
|
|
func hashParseKeys(phs []source.ParseGoHandle) string {
|
|
|
|
b := bytes.NewBuffer(nil)
|
|
|
|
for _, ph := range phs {
|
|
|
|
b.WriteString(hashParseKey(ph))
|
|
|
|
}
|
|
|
|
return hashContents(b.Bytes())
|
|
|
|
}
|
|
|
|
|
2019-09-17 09:19:11 -06:00
|
|
|
func parseGo(ctx context.Context, c *cache, fh source.FileHandle, mode source.ParseMode) (file *ast.File, mapper *protocol.ColumnMapper, parseError error, err error) {
|
2019-07-09 18:16:21 -06:00
|
|
|
ctx, done := trace.StartSpan(ctx, "cache.parseGo", telemetry.File.Of(fh.Identity().URI.Filename()))
|
2019-06-26 20:46:12 -06:00
|
|
|
defer done()
|
2019-08-08 10:18:43 -06:00
|
|
|
|
2019-06-04 20:14:37 -06:00
|
|
|
buf, _, err := fh.Read(ctx)
|
|
|
|
if err != nil {
|
2019-09-17 09:19:11 -06:00
|
|
|
return nil, nil, nil, err
|
2019-06-04 20:14:37 -06:00
|
|
|
}
|
2019-08-15 06:54:27 -06:00
|
|
|
parseLimit <- struct{}{}
|
|
|
|
defer func() { <-parseLimit }()
|
2019-06-04 20:14:37 -06:00
|
|
|
parserMode := parser.AllErrors | parser.ParseComments
|
|
|
|
if mode == source.ParseHeader {
|
2019-08-02 17:45:56 -06:00
|
|
|
parserMode = parser.ImportsOnly | parser.ParseComments
|
2019-06-04 20:14:37 -06:00
|
|
|
}
|
2019-09-17 09:19:11 -06:00
|
|
|
file, parseError = parser.ParseFile(c.fset, fh.Identity().URI.Filename(), buf, parserMode)
|
|
|
|
if file != nil {
|
2019-06-10 06:58:14 -06:00
|
|
|
if mode == source.ParseExported {
|
2019-09-17 09:19:11 -06:00
|
|
|
trimAST(file)
|
2019-06-10 06:58:14 -06:00
|
|
|
}
|
|
|
|
// Fix any badly parsed parts of the AST.
|
2019-09-17 09:19:11 -06:00
|
|
|
tok := c.fset.File(file.Pos())
|
|
|
|
if err := fix(ctx, file, tok, buf); err != nil {
|
2019-08-14 08:57:47 -06:00
|
|
|
log.Error(ctx, "failed to fix AST", err)
|
2019-06-10 06:58:14 -06:00
|
|
|
}
|
2019-06-04 20:14:37 -06:00
|
|
|
}
|
2019-09-18 10:48:40 -06:00
|
|
|
|
2019-09-17 09:19:11 -06:00
|
|
|
if file == nil {
|
2019-09-18 10:48:40 -06:00
|
|
|
// If the file is nil only due to parse errors,
|
|
|
|
// the parse errors are the actual errors.
|
|
|
|
err := parseError
|
|
|
|
if err == nil {
|
|
|
|
err = errors.Errorf("no AST for %s", fh.Identity().URI)
|
|
|
|
}
|
|
|
|
return nil, nil, parseError, err
|
2019-09-17 09:19:11 -06:00
|
|
|
}
|
|
|
|
tok := c.FileSet().File(file.Pos())
|
|
|
|
if tok == nil {
|
2019-09-18 10:48:40 -06:00
|
|
|
return nil, nil, parseError, errors.Errorf("no token.File for %s", fh.Identity().URI)
|
2019-09-17 09:19:11 -06:00
|
|
|
}
|
|
|
|
uri := fh.Identity().URI
|
|
|
|
content, _, err := fh.Read(ctx)
|
|
|
|
if err != nil {
|
|
|
|
return nil, nil, parseError, err
|
|
|
|
}
|
|
|
|
m := &protocol.ColumnMapper{
|
|
|
|
URI: uri,
|
|
|
|
Converter: span.NewTokenConverter(c.FileSet(), tok),
|
|
|
|
Content: content,
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
2019-09-17 09:19:11 -06:00
|
|
|
return file, m, parseError, nil
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
|
|
|
|
2019-05-23 11:51:56 -06:00
|
|
|
// trimAST clears any part of the AST not relevant to type checking
|
|
|
|
// expressions at pos.
|
|
|
|
func trimAST(file *ast.File) {
|
|
|
|
ast.Inspect(file, func(n ast.Node) bool {
|
|
|
|
if n == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
switch n := n.(type) {
|
|
|
|
case *ast.FuncDecl:
|
|
|
|
n.Body = nil
|
|
|
|
case *ast.BlockStmt:
|
|
|
|
n.List = nil
|
|
|
|
case *ast.CaseClause:
|
|
|
|
n.Body = nil
|
|
|
|
case *ast.CommClause:
|
|
|
|
n.Body = nil
|
|
|
|
case *ast.CompositeLit:
|
|
|
|
// Leave elts in place for [...]T
|
|
|
|
// array literals, because they can
|
|
|
|
// affect the expression's type.
|
|
|
|
if !isEllipsisArray(n.Type) {
|
|
|
|
n.Elts = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
})
|
|
|
|
}
|
2019-04-22 14:04:44 -06:00
|
|
|
|
2019-05-23 11:51:56 -06:00
|
|
|
func isEllipsisArray(n ast.Expr) bool {
|
|
|
|
at, ok := n.(*ast.ArrayType)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
_, ok = at.Len.(*ast.Ellipsis)
|
|
|
|
return ok
|
|
|
|
}
|
|
|
|
|
2019-07-27 18:52:02 -06:00
|
|
|
// fix inspects the AST and potentially modifies any *ast.BadStmts so that it can be
|
|
|
|
// type-checked more effectively.
|
2019-09-09 10:25:22 -06:00
|
|
|
func fix(ctx context.Context, n ast.Node, tok *token.File, src []byte) error {
|
2019-09-06 15:22:54 -06:00
|
|
|
var (
|
|
|
|
ancestors []ast.Node
|
2019-09-13 11:31:28 -06:00
|
|
|
parent ast.Node
|
2019-09-06 15:22:54 -06:00
|
|
|
err error
|
|
|
|
)
|
2019-09-09 10:25:22 -06:00
|
|
|
ast.Inspect(n, func(n ast.Node) bool {
|
2019-04-22 14:04:44 -06:00
|
|
|
if n == nil {
|
2019-09-06 15:22:54 -06:00
|
|
|
if len(ancestors) > 0 {
|
|
|
|
ancestors = ancestors[:len(ancestors)-1]
|
2019-09-13 11:31:28 -06:00
|
|
|
if len(ancestors) > 0 {
|
|
|
|
parent = ancestors[len(ancestors)-1]
|
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
}
|
2019-04-22 14:04:44 -06:00
|
|
|
return false
|
|
|
|
}
|
2019-09-13 11:31:28 -06:00
|
|
|
|
2019-04-22 14:04:44 -06:00
|
|
|
switch n := n.(type) {
|
|
|
|
case *ast.BadStmt:
|
2019-09-13 11:31:28 -06:00
|
|
|
err = fixDeferOrGoStmt(n, parent, tok, src) // don't shadow err
|
2019-09-09 10:25:22 -06:00
|
|
|
if err == nil {
|
2019-09-13 11:31:28 -06:00
|
|
|
// Recursively fix in our fixed node.
|
2019-09-09 10:25:22 -06:00
|
|
|
err = fix(ctx, parent, tok, src)
|
|
|
|
} else {
|
2019-08-06 13:13:11 -06:00
|
|
|
err = errors.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err)
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
|
|
|
return false
|
2019-09-13 11:31:28 -06:00
|
|
|
case *ast.BadExpr:
|
|
|
|
// Don't propagate this error since *ast.BadExpr is very common
|
|
|
|
// and it is only sometimes due to array types. Errors from here
|
|
|
|
// are expected and not actionable in general.
|
|
|
|
fixArrayErr := fixArrayType(n, parent, tok, src)
|
|
|
|
if fixArrayErr == nil {
|
|
|
|
// Recursively fix in our fixed node.
|
|
|
|
err = fix(ctx, parent, tok, src)
|
|
|
|
}
|
|
|
|
return false
|
2019-04-22 14:04:44 -06:00
|
|
|
default:
|
2019-09-06 15:22:54 -06:00
|
|
|
ancestors = append(ancestors, n)
|
2019-09-13 11:31:28 -06:00
|
|
|
parent = n
|
2019-04-22 14:04:44 -06:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
})
|
2019-06-10 06:58:14 -06:00
|
|
|
return err
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
|
|
|
|
2019-09-13 11:31:28 -06:00
|
|
|
// fixArrayType tries to parse an *ast.BadExpr into an *ast.ArrayType.
|
|
|
|
// go/parser often turns lone array types like "[]int" into BadExprs
|
|
|
|
// if it isn't expecting a type.
|
|
|
|
func fixArrayType(bad *ast.BadExpr, parent ast.Node, tok *token.File, src []byte) error {
|
|
|
|
// Our expected input is a bad expression that looks like "[]someExpr".
|
|
|
|
|
|
|
|
from := bad.Pos()
|
|
|
|
to := bad.End()
|
|
|
|
|
|
|
|
if !from.IsValid() || !to.IsValid() {
|
|
|
|
return errors.Errorf("invalid BadExpr from/to: %d/%d", from, to)
|
|
|
|
}
|
|
|
|
|
2019-09-13 13:15:53 -06:00
|
|
|
exprBytes := make([]byte, 0, int(to-from)+3)
|
2019-09-13 11:31:28 -06:00
|
|
|
// Avoid doing tok.Offset(to) since that panics if badExpr ends at EOF.
|
|
|
|
exprBytes = append(exprBytes, src[tok.Offset(from):tok.Offset(to-1)+1]...)
|
2019-09-13 13:15:53 -06:00
|
|
|
exprBytes = bytes.TrimSpace(exprBytes)
|
|
|
|
|
|
|
|
// If our expression ends in "]" (e.g. "[]"), add a phantom selector
|
|
|
|
// so we can complete directly after the "[]".
|
|
|
|
if len(exprBytes) > 0 && exprBytes[len(exprBytes)-1] == ']' {
|
|
|
|
exprBytes = append(exprBytes, '_')
|
|
|
|
}
|
2019-09-13 11:31:28 -06:00
|
|
|
|
|
|
|
// Add "{}" to turn our ArrayType into a CompositeLit. This is to
|
|
|
|
// handle the case of "[...]int" where we must make it a composite
|
|
|
|
// literal to be parseable.
|
|
|
|
exprBytes = append(exprBytes, '{', '}')
|
|
|
|
|
|
|
|
expr, err := parseExpr(from, exprBytes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
cl, _ := expr.(*ast.CompositeLit)
|
|
|
|
if cl == nil {
|
|
|
|
return errors.Errorf("expr not compLit (%T)", expr)
|
|
|
|
}
|
|
|
|
|
|
|
|
at, _ := cl.Type.(*ast.ArrayType)
|
|
|
|
if at == nil {
|
|
|
|
return errors.Errorf("compLit type not array (%T)", cl.Type)
|
|
|
|
}
|
|
|
|
|
|
|
|
if !replaceNode(parent, bad, at) {
|
|
|
|
return errors.Errorf("couldn't replace array type")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// fixDeferOrGoStmt tries to parse an *ast.BadStmt into a defer or a go statement.
|
2019-04-22 14:04:44 -06:00
|
|
|
//
|
|
|
|
// go/parser packages a statement of the form "defer x." as an *ast.BadStmt because
|
|
|
|
// it does not include a call expression. This means that go/types skips type-checking
|
|
|
|
// this statement entirely, and we can't use the type information when completing.
|
|
|
|
// Here, we try to generate a fake *ast.DeferStmt or *ast.GoStmt to put into the AST,
|
|
|
|
// instead of the *ast.BadStmt.
|
2019-09-13 11:31:28 -06:00
|
|
|
func fixDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src []byte) error {
|
2019-04-22 14:04:44 -06:00
|
|
|
// Check if we have a bad statement containing either a "go" or "defer".
|
|
|
|
s := &scanner.Scanner{}
|
|
|
|
s.Init(tok, src, nil, 0)
|
|
|
|
|
2019-09-06 15:22:54 -06:00
|
|
|
var (
|
|
|
|
pos token.Pos
|
|
|
|
tkn token.Token
|
|
|
|
)
|
2019-04-22 14:04:44 -06:00
|
|
|
for {
|
|
|
|
if tkn == token.EOF {
|
2019-08-06 13:13:11 -06:00
|
|
|
return errors.Errorf("reached the end of the file")
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
|
|
|
if pos >= bad.From {
|
|
|
|
break
|
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
pos, tkn, _ = s.Scan()
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
|
2019-04-22 14:04:44 -06:00
|
|
|
var stmt ast.Stmt
|
2019-09-06 15:22:54 -06:00
|
|
|
switch tkn {
|
|
|
|
case token.DEFER:
|
2019-04-22 14:04:44 -06:00
|
|
|
stmt = &ast.DeferStmt{
|
|
|
|
Defer: pos,
|
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
case token.GO:
|
2019-04-22 14:04:44 -06:00
|
|
|
stmt = &ast.GoStmt{
|
|
|
|
Go: pos,
|
|
|
|
}
|
|
|
|
default:
|
2019-08-06 13:13:11 -06:00
|
|
|
return errors.Errorf("no defer or go statement found")
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
|
|
|
|
2019-09-06 15:22:54 -06:00
|
|
|
var (
|
|
|
|
from, to, last token.Pos
|
|
|
|
lastToken token.Token
|
|
|
|
braceDepth int
|
|
|
|
phantomSelectors []token.Pos
|
|
|
|
)
|
2019-04-22 14:04:44 -06:00
|
|
|
FindTo:
|
|
|
|
for {
|
2019-09-06 15:22:54 -06:00
|
|
|
to, tkn, _ = s.Scan()
|
|
|
|
|
|
|
|
if from == token.NoPos {
|
|
|
|
from = to
|
|
|
|
}
|
|
|
|
|
|
|
|
switch tkn {
|
|
|
|
case token.EOF:
|
|
|
|
break FindTo
|
|
|
|
case token.SEMICOLON:
|
|
|
|
// If we aren't in nested braces, end of statement means
|
|
|
|
// end of expression.
|
|
|
|
if braceDepth == 0 {
|
|
|
|
break FindTo
|
|
|
|
}
|
|
|
|
case token.LBRACE:
|
|
|
|
braceDepth++
|
|
|
|
}
|
|
|
|
|
|
|
|
// This handles the common dangling selector case. For example in
|
2019-04-22 14:04:44 -06:00
|
|
|
//
|
2019-09-06 15:22:54 -06:00
|
|
|
// defer fmt.
|
|
|
|
// y := 1
|
2019-04-22 14:04:44 -06:00
|
|
|
//
|
2019-09-06 15:22:54 -06:00
|
|
|
// we notice the dangling period and end our expression.
|
|
|
|
//
|
|
|
|
// If the previous token was a "." and we are looking at a "}",
|
|
|
|
// the period is likely a dangling selector and needs a phantom
|
2019-09-11 00:14:36 -06:00
|
|
|
// "_". Likewise if the current token is on a different line than
|
2019-09-06 15:22:54 -06:00
|
|
|
// the period, the period is likely a dangling selector.
|
|
|
|
if lastToken == token.PERIOD && (tkn == token.RBRACE || tok.Line(to) > tok.Line(last)) {
|
|
|
|
// Insert phantom "_" selector after the dangling ".".
|
|
|
|
phantomSelectors = append(phantomSelectors, last+1)
|
|
|
|
// If we aren't in a block then end the expression after the ".".
|
|
|
|
if braceDepth == 0 {
|
|
|
|
to = last + 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
lastToken = tkn
|
|
|
|
last = to
|
|
|
|
|
2019-04-22 14:04:44 -06:00
|
|
|
switch tkn {
|
2019-09-06 15:22:54 -06:00
|
|
|
case token.RBRACE:
|
|
|
|
braceDepth--
|
|
|
|
if braceDepth <= 0 {
|
|
|
|
if braceDepth == 0 {
|
|
|
|
// +1 to include the "}" itself.
|
|
|
|
to += 1
|
|
|
|
}
|
|
|
|
break FindTo
|
|
|
|
}
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
|
2019-04-22 14:04:44 -06:00
|
|
|
if !from.IsValid() || tok.Offset(from) >= len(src) {
|
2019-08-06 13:13:11 -06:00
|
|
|
return errors.Errorf("invalid from position")
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
|
|
|
|
if !to.IsValid() || tok.Offset(to) >= len(src) {
|
|
|
|
return errors.Errorf("invalid to position %d", to)
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
|
|
|
|
// Insert any phantom selectors needed to prevent dangling "." from messing
|
|
|
|
// up the AST.
|
|
|
|
exprBytes := make([]byte, 0, int(to-from)+len(phantomSelectors))
|
|
|
|
for i, b := range src[tok.Offset(from):tok.Offset(to)] {
|
|
|
|
if len(phantomSelectors) > 0 && from+token.Pos(i) == phantomSelectors[0] {
|
|
|
|
exprBytes = append(exprBytes, '_')
|
|
|
|
phantomSelectors = phantomSelectors[1:]
|
|
|
|
}
|
|
|
|
exprBytes = append(exprBytes, b)
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(phantomSelectors) > 0 {
|
|
|
|
exprBytes = append(exprBytes, '_')
|
|
|
|
}
|
|
|
|
|
2019-09-13 11:31:28 -06:00
|
|
|
expr, err := parseExpr(from, exprBytes)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// Package the expression into a fake *ast.CallExpr and re-insert
|
|
|
|
// into the function.
|
|
|
|
call := &ast.CallExpr{
|
|
|
|
Fun: expr,
|
|
|
|
Lparen: to,
|
|
|
|
Rparen: to,
|
|
|
|
}
|
|
|
|
|
|
|
|
switch stmt := stmt.(type) {
|
|
|
|
case *ast.DeferStmt:
|
|
|
|
stmt.Call = call
|
|
|
|
case *ast.GoStmt:
|
|
|
|
stmt.Call = call
|
|
|
|
}
|
|
|
|
|
|
|
|
if !replaceNode(parent, bad, stmt) {
|
|
|
|
return errors.Errorf("couldn't replace CallExpr")
|
|
|
|
}
|
|
|
|
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// parseExpr parses the expression in src and updates its position to
|
|
|
|
// start at pos.
|
|
|
|
func parseExpr(pos token.Pos, src []byte) (ast.Expr, error) {
|
2019-09-09 10:25:22 -06:00
|
|
|
// Wrap our expression to make it a valid Go file we can pass to ParseFile.
|
|
|
|
fileSrc := bytes.Join([][]byte{
|
|
|
|
[]byte("package fake;func _(){"),
|
2019-09-13 11:31:28 -06:00
|
|
|
src,
|
2019-09-09 10:25:22 -06:00
|
|
|
[]byte("}"),
|
|
|
|
}, nil)
|
|
|
|
|
|
|
|
// Use ParseFile instead of ParseExpr because ParseFile has
|
|
|
|
// best-effort behavior, whereas ParseExpr fails hard on any error.
|
|
|
|
fakeFile, err := parser.ParseFile(token.NewFileSet(), "", fileSrc, 0)
|
|
|
|
if fakeFile == nil {
|
2019-09-13 11:31:28 -06:00
|
|
|
return nil, errors.Errorf("error reading fake file source: %v", err)
|
2019-09-09 10:25:22 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Extract our expression node from inside the fake file.
|
|
|
|
if len(fakeFile.Decls) == 0 {
|
2019-09-13 11:31:28 -06:00
|
|
|
return nil, errors.Errorf("error parsing fake file: %v", err)
|
2019-09-09 10:25:22 -06:00
|
|
|
}
|
2019-09-13 11:31:28 -06:00
|
|
|
|
2019-09-09 10:25:22 -06:00
|
|
|
fakeDecl, _ := fakeFile.Decls[0].(*ast.FuncDecl)
|
|
|
|
if fakeDecl == nil || len(fakeDecl.Body.List) == 0 {
|
2019-09-13 11:31:28 -06:00
|
|
|
return nil, errors.Errorf("no statement in %s: %v", src, err)
|
2019-09-09 10:25:22 -06:00
|
|
|
}
|
2019-09-13 11:31:28 -06:00
|
|
|
|
2019-09-09 10:25:22 -06:00
|
|
|
exprStmt, ok := fakeDecl.Body.List[0].(*ast.ExprStmt)
|
|
|
|
if !ok {
|
2019-09-13 11:31:28 -06:00
|
|
|
return nil, errors.Errorf("no expr in %s: %v", src, err)
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
2019-09-13 11:31:28 -06:00
|
|
|
|
2019-09-09 10:25:22 -06:00
|
|
|
expr := exprStmt.X
|
2019-09-06 15:22:54 -06:00
|
|
|
|
2019-04-22 14:04:44 -06:00
|
|
|
// parser.ParseExpr returns undefined positions.
|
|
|
|
// Adjust them for the current file.
|
2019-09-13 11:31:28 -06:00
|
|
|
offsetPositions(expr, pos-1-(expr.Pos()-1))
|
2019-04-22 14:04:44 -06:00
|
|
|
|
2019-09-13 11:31:28 -06:00
|
|
|
return expr, nil
|
2019-04-22 14:04:44 -06:00
|
|
|
}
|
|
|
|
|
2019-09-06 15:22:54 -06:00
|
|
|
var tokenPosType = reflect.TypeOf(token.NoPos)
|
|
|
|
|
2019-04-22 14:04:44 -06:00
|
|
|
// offsetPositions applies an offset to the positions in an ast.Node.
|
2019-06-10 06:58:14 -06:00
|
|
|
func offsetPositions(expr ast.Expr, offset token.Pos) {
|
2019-04-22 14:04:44 -06:00
|
|
|
ast.Inspect(expr, func(n ast.Node) bool {
|
2019-09-06 15:22:54 -06:00
|
|
|
if n == nil {
|
2019-04-22 14:04:44 -06:00
|
|
|
return false
|
|
|
|
}
|
2019-09-06 15:22:54 -06:00
|
|
|
|
|
|
|
v := reflect.ValueOf(n).Elem()
|
|
|
|
|
|
|
|
switch v.Kind() {
|
|
|
|
case reflect.Struct:
|
|
|
|
for i := 0; i < v.NumField(); i++ {
|
|
|
|
f := v.Field(i)
|
|
|
|
if f.Type() != tokenPosType {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
if !f.CanSet() {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2019-09-13 11:31:28 -06:00
|
|
|
f.SetInt(f.Int() + int64(offset))
|
2019-09-06 15:22:54 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true
|
2019-04-22 14:04:44 -06:00
|
|
|
})
|
|
|
|
}
|
2019-09-17 22:20:55 -06:00
|
|
|
|
|
|
|
// replaceNode updates parent's child oldChild to be newChild. It
|
|
|
|
// retuns whether it replaced successfully.
|
|
|
|
func replaceNode(parent, oldChild, newChild ast.Node) bool {
|
|
|
|
if parent == nil || oldChild == nil || newChild == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
parentVal := reflect.ValueOf(parent).Elem()
|
|
|
|
if parentVal.Kind() != reflect.Struct {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
newChildVal := reflect.ValueOf(newChild)
|
|
|
|
|
|
|
|
tryReplace := func(v reflect.Value) bool {
|
|
|
|
if !v.CanSet() || !v.CanInterface() {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the existing value is oldChild, we found our child. Make
|
|
|
|
// sure our newChild is assignable and then make the swap.
|
|
|
|
if v.Interface() == oldChild && newChildVal.Type().AssignableTo(v.Type()) {
|
|
|
|
v.Set(newChildVal)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
// Loop over parent's struct fields.
|
|
|
|
for i := 0; i < parentVal.NumField(); i++ {
|
|
|
|
f := parentVal.Field(i)
|
|
|
|
|
|
|
|
switch f.Kind() {
|
|
|
|
// Check interface and pointer fields.
|
|
|
|
case reflect.Interface, reflect.Ptr:
|
|
|
|
if tryReplace(f) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// Search through any slice fields.
|
|
|
|
case reflect.Slice:
|
|
|
|
for i := 0; i < f.Len(); i++ {
|
|
|
|
if tryReplace(f.Index(i)) {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false
|
|
|
|
}
|