2018-11-05 12:48:08 -07:00
|
|
|
// Copyright 2018 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2018-12-05 15:00:36 -07:00
|
|
|
package cache
|
2018-09-27 16:15:45 -06:00
|
|
|
|
|
|
|
import (
|
2018-12-18 14:18:03 -07:00
|
|
|
"context"
|
2018-09-27 16:15:45 -06:00
|
|
|
"fmt"
|
2019-02-06 16:47:00 -07:00
|
|
|
"go/ast"
|
|
|
|
"go/parser"
|
|
|
|
"go/scanner"
|
2018-10-19 14:03:29 -06:00
|
|
|
"go/token"
|
2019-02-06 16:47:00 -07:00
|
|
|
"go/types"
|
2019-02-05 12:22:07 -07:00
|
|
|
"log"
|
2018-09-27 16:15:45 -06:00
|
|
|
"sync"
|
|
|
|
|
2018-10-19 14:03:29 -06:00
|
|
|
"golang.org/x/tools/go/packages"
|
2018-12-05 15:00:36 -07:00
|
|
|
"golang.org/x/tools/internal/lsp/source"
|
2018-09-27 16:15:45 -06:00
|
|
|
)
|
|
|
|
|
2018-11-02 14:15:31 -06:00
|
|
|
type View struct {
|
2018-11-02 16:10:49 -06:00
|
|
|
mu sync.Mutex // protects all mutable state of the view
|
2018-11-02 14:15:31 -06:00
|
|
|
|
2018-12-18 14:18:03 -07:00
|
|
|
Config packages.Config
|
2018-10-19 14:03:29 -06:00
|
|
|
|
2018-12-05 15:00:36 -07:00
|
|
|
files map[source.URI]*File
|
2019-02-06 16:47:00 -07:00
|
|
|
|
|
|
|
analysisCache *source.AnalysisCache
|
2018-09-27 16:15:45 -06:00
|
|
|
}
|
|
|
|
|
2018-12-18 14:18:03 -07:00
|
|
|
// NewView creates a new View, given a root path and go/packages configuration.
|
|
|
|
// If config is nil, one is created with the directory set to the rootPath.
|
|
|
|
func NewView(config *packages.Config) *View {
|
2018-11-02 14:15:31 -06:00
|
|
|
return &View{
|
2018-12-18 14:18:03 -07:00
|
|
|
Config: *config,
|
|
|
|
files: make(map[source.URI]*File),
|
2018-09-27 16:15:45 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-12-18 14:18:03 -07:00
|
|
|
func (v *View) FileSet() *token.FileSet {
|
|
|
|
return v.Config.Fset
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetContent sets the overlay contents for a file. A nil content value will
|
|
|
|
// remove the file from the active set and revert it to its on-disk contents.
|
|
|
|
func (v *View) SetContent(ctx context.Context, uri source.URI, content []byte) (source.View, error) {
|
|
|
|
v.mu.Lock()
|
|
|
|
defer v.mu.Unlock()
|
|
|
|
|
|
|
|
f := v.getFile(uri)
|
|
|
|
f.content = content
|
|
|
|
|
|
|
|
// Resetting the contents invalidates the ast, token, and pkg fields.
|
|
|
|
f.ast = nil
|
|
|
|
f.token = nil
|
|
|
|
f.pkg = nil
|
|
|
|
|
|
|
|
// We might need to update the overlay.
|
|
|
|
switch {
|
|
|
|
case f.active && content == nil:
|
|
|
|
// The file was active, so we need to forget its content.
|
|
|
|
f.active = false
|
|
|
|
if filename, err := f.URI.Filename(); err == nil {
|
|
|
|
delete(f.view.Config.Overlay, filename)
|
|
|
|
}
|
|
|
|
f.content = nil
|
|
|
|
case content != nil:
|
|
|
|
// This is an active overlay, so we update the map.
|
|
|
|
f.active = true
|
|
|
|
if filename, err := f.URI.Filename(); err == nil {
|
|
|
|
f.view.Config.Overlay[filename] = f.content
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(rstambler): We should really return a new, updated view.
|
|
|
|
return v, nil
|
|
|
|
}
|
|
|
|
|
2018-12-18 13:46:14 -07:00
|
|
|
// GetFile returns a File for the given URI. It will always succeed because it
|
|
|
|
// adds the file to the managed set if needed.
|
2018-12-18 14:18:03 -07:00
|
|
|
func (v *View) GetFile(ctx context.Context, uri source.URI) (source.File, error) {
|
2018-11-02 16:10:49 -06:00
|
|
|
v.mu.Lock()
|
2018-11-05 15:54:12 -07:00
|
|
|
f := v.getFile(uri)
|
|
|
|
v.mu.Unlock()
|
2018-12-18 14:18:03 -07:00
|
|
|
return f, nil
|
2018-11-05 15:54:12 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// getFile is the unlocked internal implementation of GetFile.
|
2018-12-05 15:00:36 -07:00
|
|
|
func (v *View) getFile(uri source.URI) *File {
|
2018-11-02 16:10:49 -06:00
|
|
|
f, found := v.files[uri]
|
|
|
|
if !found {
|
2018-11-05 19:23:02 -07:00
|
|
|
f = &File{
|
|
|
|
URI: uri,
|
|
|
|
view: v,
|
|
|
|
}
|
2018-12-18 14:18:03 -07:00
|
|
|
v.files[uri] = f
|
2018-10-19 14:03:29 -06:00
|
|
|
}
|
2018-11-02 16:10:49 -06:00
|
|
|
return f
|
2018-09-27 16:15:45 -06:00
|
|
|
}
|
2018-10-19 14:03:29 -06:00
|
|
|
|
2018-12-05 15:00:36 -07:00
|
|
|
func (v *View) parse(uri source.URI) error {
|
2018-11-05 15:54:12 -07:00
|
|
|
path, err := uri.Filename()
|
2018-11-02 14:15:31 -06:00
|
|
|
if err != nil {
|
2018-11-05 15:54:12 -07:00
|
|
|
return err
|
2018-11-02 14:15:31 -06:00
|
|
|
}
|
2018-12-18 14:18:03 -07:00
|
|
|
pkgs, err := packages.Load(&v.Config, fmt.Sprintf("file=%s", path))
|
2018-10-19 14:03:29 -06:00
|
|
|
if len(pkgs) == 0 {
|
2018-11-05 19:23:02 -07:00
|
|
|
if err == nil {
|
|
|
|
err = fmt.Errorf("no packages found for %s", path)
|
|
|
|
}
|
2018-11-05 15:54:12 -07:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
for _, pkg := range pkgs {
|
2019-02-06 16:47:00 -07:00
|
|
|
imp := &importer{
|
|
|
|
entries: make(map[string]*entry),
|
|
|
|
packages: make(map[string]*packages.Package),
|
|
|
|
v: v,
|
|
|
|
topLevelPkgPath: pkg.PkgPath,
|
2019-01-07 20:56:24 -07:00
|
|
|
}
|
2019-02-06 16:47:00 -07:00
|
|
|
|
|
|
|
// TODO(rstambler): Get real TypeSizes from go/packages.
|
|
|
|
pkg.TypesSizes = &types.StdSizes{}
|
|
|
|
|
2019-02-06 16:47:00 -07:00
|
|
|
if err := imp.addImports(pkg); err != nil {
|
|
|
|
return err
|
2018-11-05 19:23:02 -07:00
|
|
|
}
|
2019-02-11 11:15:26 -07:00
|
|
|
|
|
|
|
// TODO(rstambler): Get real TypeSizes from go/packages.
|
|
|
|
pkg.TypesSizes = &types.StdSizes{}
|
|
|
|
|
2019-02-06 16:47:00 -07:00
|
|
|
imp.importPackage(pkg.PkgPath)
|
2018-11-05 19:23:02 -07:00
|
|
|
}
|
2019-02-06 16:47:00 -07:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
type importer struct {
|
|
|
|
mu sync.Mutex
|
|
|
|
entries map[string]*entry
|
|
|
|
packages map[string]*packages.Package
|
|
|
|
topLevelPkgPath string
|
|
|
|
|
|
|
|
v *View
|
|
|
|
}
|
|
|
|
|
|
|
|
type entry struct {
|
|
|
|
pkg *types.Package
|
|
|
|
err error
|
|
|
|
ready chan struct{}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (imp *importer) addImports(pkg *packages.Package) error {
|
|
|
|
imp.packages[pkg.PkgPath] = pkg
|
|
|
|
for _, i := range pkg.Imports {
|
|
|
|
if i.PkgPath == pkg.PkgPath {
|
|
|
|
return fmt.Errorf("import cycle: [%v]", pkg.PkgPath)
|
|
|
|
}
|
|
|
|
if err := imp.addImports(i); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2019-02-04 15:44:35 -07:00
|
|
|
}
|
2018-11-05 15:54:12 -07:00
|
|
|
return nil
|
2018-11-05 19:23:02 -07:00
|
|
|
}
|
2019-02-06 16:47:00 -07:00
|
|
|
|
|
|
|
func (imp *importer) Import(path string) (*types.Package, error) {
|
|
|
|
if path == imp.topLevelPkgPath {
|
|
|
|
return nil, fmt.Errorf("import cycle: [%v]", path)
|
|
|
|
}
|
|
|
|
imp.mu.Lock()
|
|
|
|
e, ok := imp.entries[path]
|
|
|
|
if ok {
|
|
|
|
// cache hit
|
|
|
|
imp.mu.Unlock()
|
|
|
|
// wait for entry to become ready
|
|
|
|
<-e.ready
|
|
|
|
} else {
|
|
|
|
// cache miss
|
|
|
|
e = &entry{ready: make(chan struct{})}
|
|
|
|
imp.entries[path] = e
|
|
|
|
imp.mu.Unlock()
|
|
|
|
|
|
|
|
// This goroutine becomes responsible for populating
|
|
|
|
// the entry and broadcasting its readiness.
|
|
|
|
e.pkg, e.err = imp.importPackage(path)
|
|
|
|
close(e.ready)
|
|
|
|
}
|
|
|
|
return e.pkg, e.err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (imp *importer) importPackage(pkgPath string) (*types.Package, error) {
|
|
|
|
imp.mu.Lock()
|
|
|
|
pkg, ok := imp.packages[pkgPath]
|
|
|
|
imp.mu.Unlock()
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("no metadata for %v", pkgPath)
|
|
|
|
}
|
|
|
|
pkg.Fset = imp.v.Config.Fset
|
|
|
|
pkg.Syntax = make([]*ast.File, len(pkg.GoFiles))
|
|
|
|
for i, filename := range pkg.GoFiles {
|
|
|
|
var src interface{}
|
|
|
|
overlay, ok := imp.v.Config.Overlay[filename]
|
|
|
|
if ok {
|
|
|
|
src = overlay
|
|
|
|
}
|
|
|
|
file, err := parser.ParseFile(imp.v.Config.Fset, filename, src, parser.AllErrors|parser.ParseComments)
|
|
|
|
if file == nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
switch err := err.(type) {
|
|
|
|
case *scanner.Error:
|
|
|
|
pkg.Errors = append(pkg.Errors, packages.Error{
|
|
|
|
Pos: err.Pos.String(),
|
|
|
|
Msg: err.Msg,
|
|
|
|
Kind: packages.ParseError,
|
|
|
|
})
|
|
|
|
case scanner.ErrorList:
|
|
|
|
// The first parser error is likely the root cause of the problem.
|
|
|
|
if err.Len() > 0 {
|
|
|
|
pkg.Errors = append(pkg.Errors, packages.Error{
|
|
|
|
Pos: err[0].Pos.String(),
|
|
|
|
Msg: err[0].Msg,
|
|
|
|
Kind: packages.ParseError,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pkg.Syntax[i] = file
|
|
|
|
}
|
|
|
|
cfg := &types.Config{
|
|
|
|
Error: func(err error) {
|
|
|
|
if err, ok := err.(types.Error); ok {
|
|
|
|
pkg.Errors = append(pkg.Errors, packages.Error{
|
|
|
|
Pos: imp.v.Config.Fset.Position(err.Pos).String(),
|
|
|
|
Msg: err.Msg,
|
|
|
|
Kind: packages.TypeError,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Importer: imp,
|
|
|
|
}
|
|
|
|
pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
|
|
|
|
pkg.TypesInfo = &types.Info{
|
|
|
|
Types: make(map[ast.Expr]types.TypeAndValue),
|
|
|
|
Defs: make(map[*ast.Ident]types.Object),
|
|
|
|
Uses: make(map[*ast.Ident]types.Object),
|
|
|
|
Implicits: make(map[ast.Node]types.Object),
|
|
|
|
Selections: make(map[*ast.SelectorExpr]*types.Selection),
|
|
|
|
Scopes: make(map[ast.Node]*types.Scope),
|
|
|
|
}
|
|
|
|
check := types.NewChecker(cfg, imp.v.Config.Fset, pkg.Types, pkg.TypesInfo)
|
|
|
|
check.Files(pkg.Syntax)
|
|
|
|
|
|
|
|
// Add every file in this package to our cache.
|
|
|
|
for _, file := range pkg.Syntax {
|
|
|
|
// TODO: If a file is in multiple packages, which package do we store?
|
|
|
|
if !file.Pos().IsValid() {
|
|
|
|
log.Printf("invalid position for file %v", file.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
tok := imp.v.Config.Fset.File(file.Pos())
|
|
|
|
if tok == nil {
|
|
|
|
log.Printf("no token.File for %v", file.Name)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
fURI := source.ToURI(tok.Name())
|
|
|
|
f := imp.v.getFile(fURI)
|
|
|
|
f.token = tok
|
|
|
|
f.ast = file
|
|
|
|
f.pkg = pkg
|
|
|
|
}
|
|
|
|
return pkg.Types, nil
|
|
|
|
}
|
2019-02-06 16:47:00 -07:00
|
|
|
|
|
|
|
func (v *View) GetAnalysisCache() *source.AnalysisCache {
|
|
|
|
if v.analysisCache == nil {
|
|
|
|
v.analysisCache = source.NewAnalysisCache()
|
|
|
|
}
|
|
|
|
return v.analysisCache
|
|
|
|
}
|