From d56157ae751d43fa5668eee3f2bfe4d8087cf660 Mon Sep 17 00:00:00 2001 From: Rebecca Stambler Date: Mon, 4 Mar 2019 17:01:39 -0500 Subject: [PATCH] internal/lsp: remove handling for circular imports Also, separate type-checking logic into its own file. go/packages returns import cycle errors anyway, so we just return them instead. Change-Id: I1f524cdf81e1f9655c1b0afd50dd2aeaa167bb2f Reviewed-on: https://go-review.googlesource.com/c/tools/+/165021 Reviewed-by: Michael Matloob --- internal/lsp/cache/check.go | 307 ++++++++++++++++++++++++++ internal/lsp/cache/view.go | 298 ------------------------- internal/lsp/lsp_test.go | 2 +- internal/lsp/source/diagnostics.go | 2 +- internal/lsp/testdata/self/self.go.in | 13 -- 5 files changed, 309 insertions(+), 313 deletions(-) create mode 100644 internal/lsp/cache/check.go delete mode 100644 internal/lsp/testdata/self/self.go.in diff --git a/internal/lsp/cache/check.go b/internal/lsp/cache/check.go new file mode 100644 index 00000000000..fbfbe94590e --- /dev/null +++ b/internal/lsp/cache/check.go @@ -0,0 +1,307 @@ +package cache + +import ( + "fmt" + "go/ast" + "go/scanner" + "go/types" + "io/ioutil" + "log" + "os" + "path/filepath" + "strings" + "sync" + + "golang.org/x/tools/go/packages" + "golang.org/x/tools/internal/lsp/source" +) + +func (v *View) parse(uri source.URI) error { + path, err := uri.Filename() + if err != nil { + return err + } + // TODO(rstambler): Enforce here that LoadMode is LoadImports? + pkgs, err := packages.Load(&v.Config, fmt.Sprintf("file=%s", path)) + if len(pkgs) == 0 { + if err == nil { + err = fmt.Errorf("no packages found for %s", path) + } + return err + } + var foundPkg bool // true if we found the package for uri + for _, pkg := range pkgs { + for _, err := range pkg.Errors { + switch err.Kind { + case packages.UnknownError, packages.ListError: + return err + } + } + // TODO(rstambler): Get real TypeSizes from go/packages (golang.org/issues/30139). + pkg.TypesSizes = &types.StdSizes{} + + imp := &importer{ + entries: make(map[string]*entry), + packages: make(map[string]*packages.Package), + v: v, + } + if err := imp.addImports(pkg.PkgPath, pkg); err != nil { + return err + } + // Start prefetching direct imports. + for importPath := range pkg.Imports { + go imp.Import(importPath) + } + imp.importPackage(pkg.PkgPath) + + // Add every file in this package to our cache. + for _, file := range pkg.Syntax { + // TODO: If a file is in multiple packages, which package do we store? + if !file.Pos().IsValid() { + log.Printf("invalid position for file %v", file.Name) + continue + } + tok := imp.v.Config.Fset.File(file.Pos()) + if tok == nil { + log.Printf("no token.File for %v", file.Name) + continue + } + fURI := source.ToURI(tok.Name()) + if fURI == uri { + foundPkg = true + } + f := imp.v.getFile(fURI) + f.token = tok + f.ast = file + f.pkg = pkg + } + } + if !foundPkg { + return fmt.Errorf("no package found for %v", uri) + } + return nil +} + +type importer struct { + mu sync.Mutex + entries map[string]*entry + packages map[string]*packages.Package + + v *View +} + +type entry struct { + pkg *types.Package + err error + ready chan struct{} +} + +func (imp *importer) addImports(path string, pkg *packages.Package) error { + if _, ok := imp.packages[path]; ok { + return nil + } + imp.packages[path] = pkg + for importPath, importPkg := range pkg.Imports { + if err := imp.addImports(importPath, importPkg); err != nil { + return err + } + } + return nil +} + +func (imp *importer) Import(path string) (*types.Package, error) { + imp.mu.Lock() + e, ok := imp.entries[path] + if ok { + // cache hit + imp.mu.Unlock() + // wait for entry to become ready + <-e.ready + } else { + // cache miss + e = &entry{ready: make(chan struct{})} + imp.entries[path] = e + imp.mu.Unlock() + + // This goroutine becomes responsible for populating + // the entry and broadcasting its readiness. + e.pkg, e.err = imp.importPackage(path) + close(e.ready) + } + return e.pkg, e.err +} + +func (imp *importer) importPackage(pkgPath string) (*types.Package, error) { + imp.mu.Lock() + pkg, ok := imp.packages[pkgPath] + imp.mu.Unlock() + if !ok { + return nil, fmt.Errorf("no metadata for %v", pkgPath) + } + pkg.Fset = imp.v.Config.Fset + appendError := func(err error) { + imp.appendPkgError(pkg, err) + } + files, errs := imp.parseFiles(pkg.CompiledGoFiles) + for _, err := range errs { + appendError(err) + } + pkg.Syntax = files + cfg := &types.Config{ + Error: appendError, + Importer: imp, + } + pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) + pkg.TypesInfo = &types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + Implicits: make(map[ast.Node]types.Object), + Selections: make(map[*ast.SelectorExpr]*types.Selection), + Scopes: make(map[ast.Node]*types.Scope), + } + check := types.NewChecker(cfg, imp.v.Config.Fset, pkg.Types, pkg.TypesInfo) + check.Files(pkg.Syntax) + + return pkg.Types, nil +} + +func (imp *importer) appendPkgError(pkg *packages.Package, err error) { + if err == nil { + return + } + var errs []packages.Error + switch err := err.(type) { + case *scanner.Error: + errs = append(errs, packages.Error{ + Pos: err.Pos.String(), + Msg: err.Msg, + Kind: packages.ParseError, + }) + case scanner.ErrorList: + // The first parser error is likely the root cause of the problem. + if err.Len() > 0 { + errs = append(errs, packages.Error{ + Pos: err[0].Pos.String(), + Msg: err[0].Msg, + Kind: packages.ParseError, + }) + } + case types.Error: + errs = append(errs, packages.Error{ + Pos: imp.v.Config.Fset.Position(err.Pos).String(), + Msg: err.Msg, + Kind: packages.TypeError, + }) + } + pkg.Errors = append(pkg.Errors, errs...) +} + +// We use a counting semaphore to limit +// the number of parallel I/O calls per process. +var ioLimit = make(chan bool, 20) + +// parseFiles reads and parses the Go source files and returns the ASTs +// of the ones that could be at least partially parsed, along with a +// list of I/O and parse errors encountered. +// +// Because files are scanned in parallel, the token.Pos +// positions of the resulting ast.Files are not ordered. +// +func (imp *importer) parseFiles(filenames []string) ([]*ast.File, []error) { + var wg sync.WaitGroup + n := len(filenames) + parsed := make([]*ast.File, n) + errors := make([]error, n) + for i, filename := range filenames { + if imp.v.Config.Context.Err() != nil { + parsed[i] = nil + errors[i] = imp.v.Config.Context.Err() + continue + } + + // First, check if we have already cached an AST for this file. + f := imp.v.files[source.ToURI(filename)] + var fAST *ast.File + if f != nil { + fAST = f.ast + } + + wg.Add(1) + go func(i int, filename string) { + ioLimit <- true // wait + + if fAST != nil { + parsed[i], errors[i] = fAST, nil + } else { + // We don't have a cached AST for this file. + var src []byte + // Check for an available overlay. + for f, contents := range imp.v.Config.Overlay { + if sameFile(f, filename) { + src = contents + } + } + var err error + // We don't have an overlay, so we must read the file's contents. + if src == nil { + src, err = ioutil.ReadFile(filename) + } + if err != nil { + parsed[i], errors[i] = nil, err + } else { + // ParseFile may return both an AST and an error. + parsed[i], errors[i] = imp.v.Config.ParseFile(imp.v.Config.Fset, filename, src) + } + } + + <-ioLimit // signal + wg.Done() + }(i, filename) + } + wg.Wait() + + // Eliminate nils, preserving order. + var o int + for _, f := range parsed { + if f != nil { + parsed[o] = f + o++ + } + } + parsed = parsed[:o] + + o = 0 + for _, err := range errors { + if err != nil { + errors[o] = err + o++ + } + } + errors = errors[:o] + + return parsed, errors +} + +// sameFile returns true if x and y have the same basename and denote +// the same file. +// +func sameFile(x, y string) bool { + if x == y { + // It could be the case that y doesn't exist. + // For instance, it may be an overlay file that + // hasn't been written to disk. To handle that case + // let x == y through. (We added the exact absolute path + // string to the CompiledGoFiles list, so the unwritten + // overlay case implies x==y.) + return true + } + if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) + if xi, err := os.Stat(x); err == nil { + if yi, err := os.Stat(y); err == nil { + return os.SameFile(xi, yi) + } + } + } + return false +} diff --git a/internal/lsp/cache/view.go b/internal/lsp/cache/view.go index 8c719eaddc1..b868726a0e9 100644 --- a/internal/lsp/cache/view.go +++ b/internal/lsp/cache/view.go @@ -6,16 +6,7 @@ package cache import ( "context" - "fmt" - "go/ast" - "go/scanner" "go/token" - "go/types" - "io/ioutil" - "log" - "os" - "path/filepath" - "strings" "sync" "golang.org/x/tools/go/packages" @@ -119,292 +110,3 @@ func (v *View) getFile(uri source.URI) *File { } return f } - -func (v *View) parse(uri source.URI) error { - path, err := uri.Filename() - if err != nil { - return err - } - // TODO(rstambler): Enforce here that LoadMode is LoadImports? - pkgs, err := packages.Load(&v.Config, fmt.Sprintf("file=%s", path)) - if len(pkgs) == 0 { - if err == nil { - err = fmt.Errorf("no packages found for %s", path) - } - return err - } - var foundPkg bool // true if we found the package for uri - for _, pkg := range pkgs { - // TODO(rstambler): Get real TypeSizes from go/packages (golang.org/issues/30139). - pkg.TypesSizes = &types.StdSizes{} - - imp := &importer{ - entries: make(map[string]*entry), - packages: make(map[string]*packages.Package), - v: v, - topLevelPkgPath: pkg.PkgPath, - } - if err := imp.addImports(pkg.PkgPath, pkg); err != nil { - return err - } - // Start prefetching direct imports. - for importPath := range pkg.Imports { - go imp.Import(importPath) - } - imp.importPackage(pkg.PkgPath) - - // Add every file in this package to our cache. - for _, file := range pkg.Syntax { - // TODO: If a file is in multiple packages, which package do we store? - if !file.Pos().IsValid() { - log.Printf("invalid position for file %v", file.Name) - continue - } - tok := imp.v.Config.Fset.File(file.Pos()) - if tok == nil { - log.Printf("no token.File for %v", file.Name) - continue - } - fURI := source.ToURI(tok.Name()) - if fURI == uri { - foundPkg = true - } - f := imp.v.getFile(fURI) - f.token = tok - f.ast = file - f.pkg = pkg - } - } - if !foundPkg { - return fmt.Errorf("no package found for %v", uri) - } - return nil -} - -type importer struct { - mu sync.Mutex - entries map[string]*entry - packages map[string]*packages.Package - topLevelPkgPath string - - v *View -} - -type entry struct { - pkg *types.Package - err error - ready chan struct{} -} - -func (imp *importer) addImports(path string, pkg *packages.Package) error { - if _, ok := imp.packages[path]; ok { - return nil - } - imp.packages[path] = pkg - for importPath, importPkg := range pkg.Imports { - if err := imp.addImports(importPath, importPkg); err != nil { - return err - } - } - return nil -} - -func (imp *importer) Import(path string) (*types.Package, error) { - if path == imp.topLevelPkgPath { - return nil, fmt.Errorf("import cycle: [%v]", path) - } - imp.mu.Lock() - e, ok := imp.entries[path] - if ok { - // cache hit - imp.mu.Unlock() - // wait for entry to become ready - <-e.ready - } else { - // cache miss - e = &entry{ready: make(chan struct{})} - imp.entries[path] = e - imp.mu.Unlock() - - // This goroutine becomes responsible for populating - // the entry and broadcasting its readiness. - e.pkg, e.err = imp.importPackage(path) - close(e.ready) - } - return e.pkg, e.err -} - -func (imp *importer) importPackage(pkgPath string) (*types.Package, error) { - imp.mu.Lock() - pkg, ok := imp.packages[pkgPath] - imp.mu.Unlock() - if !ok { - return nil, fmt.Errorf("no metadata for %v", pkgPath) - } - pkg.Fset = imp.v.Config.Fset - appendError := func(err error) { - imp.appendPkgError(pkg, err) - } - files, errs := imp.parseFiles(pkg.CompiledGoFiles) - for _, err := range errs { - appendError(err) - } - pkg.Syntax = files - cfg := &types.Config{ - Error: appendError, - Importer: imp, - } - pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name) - pkg.TypesInfo = &types.Info{ - Types: make(map[ast.Expr]types.TypeAndValue), - Defs: make(map[*ast.Ident]types.Object), - Uses: make(map[*ast.Ident]types.Object), - Implicits: make(map[ast.Node]types.Object), - Selections: make(map[*ast.SelectorExpr]*types.Selection), - Scopes: make(map[ast.Node]*types.Scope), - } - check := types.NewChecker(cfg, imp.v.Config.Fset, pkg.Types, pkg.TypesInfo) - check.Files(pkg.Syntax) - - return pkg.Types, nil -} - -func (imp *importer) appendPkgError(pkg *packages.Package, err error) { - if err == nil { - return - } - var errs []packages.Error - switch err := err.(type) { - case *scanner.Error: - errs = append(errs, packages.Error{ - Pos: err.Pos.String(), - Msg: err.Msg, - Kind: packages.ParseError, - }) - case scanner.ErrorList: - // The first parser error is likely the root cause of the problem. - if err.Len() > 0 { - errs = append(errs, packages.Error{ - Pos: err[0].Pos.String(), - Msg: err[0].Msg, - Kind: packages.ParseError, - }) - } - case types.Error: - errs = append(errs, packages.Error{ - Pos: imp.v.Config.Fset.Position(err.Pos).String(), - Msg: err.Msg, - Kind: packages.TypeError, - }) - } - pkg.Errors = append(pkg.Errors, errs...) -} - -// We use a counting semaphore to limit -// the number of parallel I/O calls per process. -var ioLimit = make(chan bool, 20) - -// parseFiles reads and parses the Go source files and returns the ASTs -// of the ones that could be at least partially parsed, along with a -// list of I/O and parse errors encountered. -// -// Because files are scanned in parallel, the token.Pos -// positions of the resulting ast.Files are not ordered. -// -func (imp *importer) parseFiles(filenames []string) ([]*ast.File, []error) { - var wg sync.WaitGroup - n := len(filenames) - parsed := make([]*ast.File, n) - errors := make([]error, n) - for i, filename := range filenames { - if imp.v.Config.Context.Err() != nil { - parsed[i] = nil - errors[i] = imp.v.Config.Context.Err() - continue - } - - // First, check if we have already cached an AST for this file. - f := imp.v.files[source.ToURI(filename)] - var fAST *ast.File - if f != nil { - fAST = f.ast - } - - wg.Add(1) - go func(i int, filename string) { - ioLimit <- true // wait - - if fAST != nil { - parsed[i], errors[i] = fAST, nil - } else { - // We don't have a cached AST for this file. - var src []byte - // Check for an available overlay. - for f, contents := range imp.v.Config.Overlay { - if sameFile(f, filename) { - src = contents - } - } - var err error - // We don't have an overlay, so we must read the file's contents. - if src == nil { - src, err = ioutil.ReadFile(filename) - } - if err != nil { - parsed[i], errors[i] = nil, err - } else { - // ParseFile may return both an AST and an error. - parsed[i], errors[i] = imp.v.Config.ParseFile(imp.v.Config.Fset, filename, src) - } - } - - <-ioLimit // signal - wg.Done() - }(i, filename) - } - wg.Wait() - - // Eliminate nils, preserving order. - var o int - for _, f := range parsed { - if f != nil { - parsed[o] = f - o++ - } - } - parsed = parsed[:o] - - o = 0 - for _, err := range errors { - if err != nil { - errors[o] = err - o++ - } - } - errors = errors[:o] - - return parsed, errors -} - -// sameFile returns true if x and y have the same basename and denote -// the same file. -// -func sameFile(x, y string) bool { - if x == y { - // It could be the case that y doesn't exist. - // For instance, it may be an overlay file that - // hasn't been written to disk. To handle that case - // let x == y through. (We added the exact absolute path - // string to the CompiledGoFiles list, so the unwritten - // overlay case implies x==y.) - return true - } - if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation) - if xi, err := os.Stat(x); err == nil { - if yi, err := os.Stat(y); err == nil { - return os.SameFile(xi, yi) - } - } - } - return false -} diff --git a/internal/lsp/lsp_test.go b/internal/lsp/lsp_test.go index d0fa18b880b..5d8efeed543 100644 --- a/internal/lsp/lsp_test.go +++ b/internal/lsp/lsp_test.go @@ -37,7 +37,7 @@ func testLSP(t *testing.T, exporter packagestest.Exporter) { // We hardcode the expected number of test cases to ensure that all tests // are being executed. If a test is added, this number must be changed. const expectedCompletionsCount = 63 - const expectedDiagnosticsCount = 17 + const expectedDiagnosticsCount = 16 const expectedFormatCount = 4 const expectedDefinitionsCount = 16 const expectedTypeDefinitionsCount = 2 diff --git a/internal/lsp/source/diagnostics.go b/internal/lsp/source/diagnostics.go index 395a248fb88..adb1ded1486 100644 --- a/internal/lsp/source/diagnostics.go +++ b/internal/lsp/source/diagnostics.go @@ -61,7 +61,7 @@ func Diagnostics(ctx context.Context, v View, uri URI) (map[string][]Diagnostic, pkg := f.GetPackage() // Prepare the reports we will send for this package. reports := make(map[string][]Diagnostic) - for _, filename := range pkg.GoFiles { + for _, filename := range pkg.CompiledGoFiles { reports[filename] = []Diagnostic{} } var parseErrors, typeErrors []packages.Error diff --git a/internal/lsp/testdata/self/self.go.in b/internal/lsp/testdata/self/self.go.in deleted file mode 100644 index ad0d41764f7..00000000000 --- a/internal/lsp/testdata/self/self.go.in +++ /dev/null @@ -1,13 +0,0 @@ -// +build go1.11 - -package self - -import ( - "golang.org/x/tools/internal/lsp/self" //@diag("\"golang.org/x/tools/internal/lsp/self\"", "LSP", "could not import golang.org/x/tools/internal/lsp/self (import cycle: [golang.org/x/tools/internal/lsp/self])") -) - -func Hello() {} - -func _() { - self.Hello() -}