2019-12-17 13:43:36 -07:00
|
|
|
// Copyright 2019 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2019-09-23 18:06:15 -06:00
|
|
|
package cache
|
|
|
|
|
|
|
|
import (
|
2020-06-11 16:25:17 -06:00
|
|
|
"bytes"
|
2019-09-23 18:06:15 -06:00
|
|
|
"context"
|
2020-01-30 18:52:23 -07:00
|
|
|
"fmt"
|
2020-02-06 15:49:19 -07:00
|
|
|
"go/ast"
|
2020-07-24 15:17:13 -06:00
|
|
|
"go/parser"
|
2020-02-06 15:49:19 -07:00
|
|
|
"go/token"
|
2020-04-22 11:45:24 -06:00
|
|
|
"go/types"
|
2020-06-11 16:25:17 -06:00
|
|
|
"io"
|
2019-10-15 16:07:52 -06:00
|
|
|
"os"
|
2020-01-06 16:08:39 -07:00
|
|
|
"path/filepath"
|
2020-02-14 09:33:11 -07:00
|
|
|
"sort"
|
|
|
|
"strings"
|
2019-09-27 11:17:59 -06:00
|
|
|
"sync"
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-10-15 11:27:09 -06:00
|
|
|
"golang.org/x/tools/go/analysis"
|
2020-02-06 15:49:19 -07:00
|
|
|
"golang.org/x/tools/go/packages"
|
2020-04-17 07:32:56 -06:00
|
|
|
"golang.org/x/tools/internal/event"
|
2020-06-11 16:25:17 -06:00
|
|
|
"golang.org/x/tools/internal/gocommand"
|
2020-03-10 21:09:39 -06:00
|
|
|
"golang.org/x/tools/internal/lsp/debug/tag"
|
2019-09-27 11:17:59 -06:00
|
|
|
"golang.org/x/tools/internal/lsp/source"
|
2020-07-16 15:37:12 -06:00
|
|
|
"golang.org/x/tools/internal/memoize"
|
2020-03-23 06:35:36 -06:00
|
|
|
"golang.org/x/tools/internal/packagesinternal"
|
2019-09-23 18:06:15 -06:00
|
|
|
"golang.org/x/tools/internal/span"
|
2020-06-10 14:05:41 -06:00
|
|
|
"golang.org/x/tools/internal/typesinternal"
|
2020-07-24 15:41:50 -06:00
|
|
|
errors "golang.org/x/xerrors"
|
2019-09-23 18:06:15 -06:00
|
|
|
)
|
|
|
|
|
|
|
|
type snapshot struct {
|
2020-07-16 15:37:12 -06:00
|
|
|
memoize.Arg // allow as a memoize.Function arg
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
id uint64
|
2020-06-02 08:57:20 -06:00
|
|
|
view *View
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2020-07-02 16:34:10 -06:00
|
|
|
active sync.WaitGroup
|
|
|
|
|
2020-07-24 15:41:50 -06:00
|
|
|
// builtin pins the AST and package for builtin.go in memory.
|
|
|
|
builtin *builtinPackageHandle
|
|
|
|
|
2019-11-21 16:55:49 -07:00
|
|
|
// mu guards all of the maps in the snapshot.
|
2019-09-27 11:17:59 -06:00
|
|
|
mu sync.Mutex
|
|
|
|
|
|
|
|
// ids maps file URIs to package IDs.
|
|
|
|
// It may be invalidated on calls to go/packages.
|
|
|
|
ids map[span.URI][]packageID
|
|
|
|
|
|
|
|
// metadata maps file IDs to their associated metadata.
|
|
|
|
// It may invalidated on calls to go/packages.
|
2019-09-23 18:06:15 -06:00
|
|
|
metadata map[packageID]*metadata
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// importedBy maps package IDs to the list of packages that import them.
|
|
|
|
importedBy map[packageID][]packageID
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
// files maps file URIs to their corresponding FileHandles.
|
|
|
|
// It may invalidated when a file's content changes.
|
2020-07-26 16:01:39 -06:00
|
|
|
files map[span.URI]source.VersionedFileHandle
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2020-07-24 15:17:13 -06:00
|
|
|
// goFiles maps a parseKey to its parseGoHandle.
|
|
|
|
goFiles map[parseKey]*parseGoHandle
|
|
|
|
|
2020-01-14 16:29:21 -07:00
|
|
|
// packages maps a packageKey to a set of packageHandles to which that file belongs.
|
2019-09-27 11:17:59 -06:00
|
|
|
// It may be invalidated when a file's content changes.
|
2019-11-29 23:17:57 -07:00
|
|
|
packages map[packageKey]*packageHandle
|
2019-10-14 14:13:06 -06:00
|
|
|
|
|
|
|
// actions maps an actionkey to its actionHandle.
|
|
|
|
actions map[actionKey]*actionHandle
|
2019-11-12 18:16:00 -07:00
|
|
|
|
|
|
|
// workspacePackages contains the workspace's packages, which are loaded
|
|
|
|
// when the view is created.
|
2020-01-07 19:37:41 -07:00
|
|
|
workspacePackages map[packageID]packagePath
|
2020-01-23 17:24:51 -07:00
|
|
|
|
2020-07-28 16:18:43 -06:00
|
|
|
// workspaceDirectories are the directories containing workspace packages.
|
|
|
|
// They are the view's root, as well as any replace targets.
|
|
|
|
workspaceDirectories map[span.URI]struct{}
|
|
|
|
|
2020-01-23 17:24:51 -07:00
|
|
|
// unloadableFiles keeps track of files that we've failed to load.
|
|
|
|
unloadableFiles map[span.URI]struct{}
|
2020-02-18 13:47:38 -07:00
|
|
|
|
2020-06-19 17:07:57 -06:00
|
|
|
// parseModHandles keeps track of any ParseModHandles for the snapshot.
|
|
|
|
// The handles need not refer to only the view's go.mod file.
|
|
|
|
parseModHandles map[span.URI]*parseModHandle
|
2020-02-14 09:33:11 -07:00
|
|
|
|
2020-06-19 17:07:57 -06:00
|
|
|
// Preserve go.mod-related handles to avoid garbage-collecting the results
|
|
|
|
// of various calls to the go command.
|
|
|
|
//
|
|
|
|
// TODO(rstambler): If we end up with any more such handles, we should
|
|
|
|
// consider creating a struct for them.
|
|
|
|
modTidyHandle *modTidyHandle
|
|
|
|
modWhyHandle *modWhyHandle
|
|
|
|
modUpgradeHandle *modUpgradeHandle
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-10-15 11:27:09 -06:00
|
|
|
type packageKey struct {
|
|
|
|
mode source.ParseMode
|
|
|
|
id packageID
|
|
|
|
}
|
|
|
|
|
|
|
|
type actionKey struct {
|
|
|
|
pkg packageKey
|
|
|
|
analyzer *analysis.Analyzer
|
|
|
|
}
|
|
|
|
|
2020-01-28 22:12:28 -07:00
|
|
|
func (s *snapshot) ID() uint64 {
|
|
|
|
return s.id
|
|
|
|
}
|
|
|
|
|
2019-10-04 15:18:43 -06:00
|
|
|
func (s *snapshot) View() source.View {
|
|
|
|
return s.view
|
|
|
|
}
|
|
|
|
|
2020-07-28 15:00:10 -06:00
|
|
|
func (s *snapshot) FileSet() *token.FileSet {
|
|
|
|
return s.view.session.cache.fset
|
|
|
|
}
|
|
|
|
|
2020-06-10 23:11:52 -06:00
|
|
|
// config returns the configuration used for the snapshot's interaction with the
|
2020-02-06 15:49:19 -07:00
|
|
|
// go/packages API.
|
2020-06-11 16:25:17 -06:00
|
|
|
func (s *snapshot) config(ctx context.Context) *packages.Config {
|
2020-04-02 10:14:48 -06:00
|
|
|
s.view.optionsMu.Lock()
|
|
|
|
env, buildFlags := s.view.envLocked()
|
|
|
|
verboseOutput := s.view.options.VerboseOutput
|
|
|
|
s.view.optionsMu.Unlock()
|
|
|
|
|
2020-03-23 06:35:36 -06:00
|
|
|
cfg := &packages.Config{
|
2020-02-06 15:49:19 -07:00
|
|
|
Context: ctx,
|
2020-07-20 23:34:22 -06:00
|
|
|
Dir: s.view.root.Filename(),
|
2020-07-21 18:08:27 -06:00
|
|
|
Env: append([]string{}, env...),
|
|
|
|
BuildFlags: append([]string{}, buildFlags...),
|
2020-02-06 15:49:19 -07:00
|
|
|
Mode: packages.NeedName |
|
|
|
|
packages.NeedFiles |
|
|
|
|
packages.NeedCompiledGoFiles |
|
|
|
|
packages.NeedImports |
|
|
|
|
packages.NeedDeps |
|
2020-05-15 15:13:55 -06:00
|
|
|
packages.NeedTypesSizes |
|
|
|
|
packages.NeedModule,
|
2020-02-06 15:49:19 -07:00
|
|
|
Fset: s.view.session.cache.fset,
|
|
|
|
Overlay: s.buildOverlay(),
|
|
|
|
ParseFile: func(*token.FileSet, string, []byte) (*ast.File, error) {
|
|
|
|
panic("go/packages must not be used to parse files")
|
|
|
|
},
|
|
|
|
Logf: func(format string, args ...interface{}) {
|
2020-04-02 10:14:48 -06:00
|
|
|
if verboseOutput {
|
2020-04-20 10:14:12 -06:00
|
|
|
event.Log(ctx, fmt.Sprintf(format, args...))
|
2020-02-06 15:49:19 -07:00
|
|
|
}
|
|
|
|
},
|
|
|
|
Tests: true,
|
|
|
|
}
|
2020-04-22 11:45:24 -06:00
|
|
|
// We want to type check cgo code if go/types supports it.
|
2020-06-10 14:05:41 -06:00
|
|
|
if typesinternal.SetUsesCgo(&types.Config{}) {
|
2020-06-10 15:46:51 -06:00
|
|
|
cfg.Mode |= packages.LoadMode(packagesinternal.TypecheckCgo)
|
2020-04-22 11:45:24 -06:00
|
|
|
}
|
2020-06-25 23:34:55 -06:00
|
|
|
packagesinternal.SetGoCmdRunner(cfg, s.view.session.gocmdRunner)
|
2020-03-23 06:35:36 -06:00
|
|
|
|
|
|
|
return cfg
|
2020-02-06 15:49:19 -07:00
|
|
|
}
|
|
|
|
|
2020-06-21 21:21:15 -06:00
|
|
|
func (s *snapshot) RunGoCommandDirect(ctx context.Context, verb string, args []string) error {
|
2020-07-24 11:39:58 -06:00
|
|
|
_, runner, inv, cleanup, err := s.goCommandInvocation(ctx, false, verb, args)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
_, err = runner.Run(ctx, *inv)
|
2020-06-21 21:21:15 -06:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-06-11 16:25:17 -06:00
|
|
|
func (s *snapshot) RunGoCommand(ctx context.Context, verb string, args []string) (*bytes.Buffer, error) {
|
2020-07-24 11:39:58 -06:00
|
|
|
_, runner, inv, cleanup, err := s.goCommandInvocation(ctx, true, verb, args)
|
2020-06-10 23:11:52 -06:00
|
|
|
if err != nil {
|
2020-07-24 11:39:58 -06:00
|
|
|
return nil, err
|
2020-06-10 23:11:52 -06:00
|
|
|
}
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-07-24 11:39:58 -06:00
|
|
|
return runner.Run(ctx, *inv)
|
2020-06-10 23:11:52 -06:00
|
|
|
}
|
|
|
|
|
2020-07-24 11:39:58 -06:00
|
|
|
func (s *snapshot) RunGoCommandPiped(ctx context.Context, verb string, args []string, stdout, stderr io.Writer) error {
|
|
|
|
_, runner, inv, cleanup, err := s.goCommandInvocation(ctx, true, verb, args)
|
2020-06-10 23:11:52 -06:00
|
|
|
if err != nil {
|
2020-07-24 11:39:58 -06:00
|
|
|
return err
|
2020-06-10 23:11:52 -06:00
|
|
|
}
|
|
|
|
defer cleanup()
|
2020-07-24 11:39:58 -06:00
|
|
|
return runner.RunPiped(ctx, *inv, stdout, stderr)
|
2020-06-10 23:11:52 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Assumes that modURI is only provided when the -modfile flag is enabled.
|
2020-07-24 11:39:58 -06:00
|
|
|
func (s *snapshot) goCommandInvocation(ctx context.Context, allowTempModfile bool, verb string, args []string) (tmpURI span.URI, runner *gocommand.Runner, inv *gocommand.Invocation, cleanup func(), err error) {
|
2020-06-10 23:11:52 -06:00
|
|
|
cleanup = func() {} // fallback
|
2020-07-24 11:39:58 -06:00
|
|
|
cfg := s.config(ctx)
|
|
|
|
if allowTempModfile && s.view.tmpMod {
|
|
|
|
modFH, err := s.GetFile(ctx, s.view.modURI)
|
|
|
|
if err != nil {
|
|
|
|
return "", nil, nil, cleanup, err
|
|
|
|
}
|
|
|
|
// Use the go.sum if it happens to be available.
|
|
|
|
sumFH, _ := s.sumFH(ctx, modFH)
|
|
|
|
|
|
|
|
tmpURI, cleanup, err = tempModFile(modFH, sumFH)
|
2020-06-10 23:11:52 -06:00
|
|
|
if err != nil {
|
2020-07-24 11:39:58 -06:00
|
|
|
return "", nil, nil, cleanup, err
|
2020-06-10 23:11:52 -06:00
|
|
|
}
|
|
|
|
cfg.BuildFlags = append(cfg.BuildFlags, fmt.Sprintf("-modfile=%s", tmpURI.Filename()))
|
|
|
|
}
|
2020-07-24 11:39:58 -06:00
|
|
|
runner = packagesinternal.GetGoCmdRunner(cfg)
|
|
|
|
return tmpURI, runner, &gocommand.Invocation{
|
2020-06-11 16:25:17 -06:00
|
|
|
Verb: verb,
|
|
|
|
Args: args,
|
|
|
|
Env: cfg.Env,
|
|
|
|
BuildFlags: cfg.BuildFlags,
|
2020-06-10 23:11:52 -06:00
|
|
|
WorkingDir: cfg.Dir,
|
|
|
|
}, cleanup, nil
|
2020-06-11 16:25:17 -06:00
|
|
|
}
|
|
|
|
|
2020-02-06 15:49:19 -07:00
|
|
|
func (s *snapshot) buildOverlay() map[string][]byte {
|
2020-02-06 14:20:50 -07:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2020-02-06 15:49:19 -07:00
|
|
|
overlays := make(map[string][]byte)
|
|
|
|
for uri, fh := range s.files {
|
|
|
|
overlay, ok := fh.(*overlay)
|
|
|
|
if !ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if overlay.saved {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
// TODO(rstambler): Make sure not to send overlays outside of the current view.
|
|
|
|
overlays[uri.Filename()] = overlay.text
|
|
|
|
}
|
|
|
|
return overlays
|
|
|
|
}
|
|
|
|
|
2020-07-26 16:01:39 -06:00
|
|
|
func hashUnsavedOverlays(files map[span.URI]source.VersionedFileHandle) string {
|
2020-02-14 09:33:11 -07:00
|
|
|
var unsaved []string
|
|
|
|
for uri, fh := range files {
|
|
|
|
if overlay, ok := fh.(*overlay); ok && !overlay.saved {
|
|
|
|
unsaved = append(unsaved, uri.Filename())
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sort.Strings(unsaved)
|
|
|
|
return hashContents([]byte(strings.Join(unsaved, "")))
|
|
|
|
}
|
|
|
|
|
2020-07-22 09:32:32 -06:00
|
|
|
func (s *snapshot) PackagesForFile(ctx context.Context, uri span.URI) ([]source.Package, error) {
|
|
|
|
ctx = event.Label(ctx, tag.URI.Of(uri))
|
2019-11-20 15:57:05 -07:00
|
|
|
|
2020-01-30 19:29:41 -07:00
|
|
|
// Check if we should reload metadata for the file. We don't invalidate IDs
|
|
|
|
// (though we should), so the IDs will be a better source of truth than the
|
|
|
|
// metadata. If there are no IDs for the file, then we should also reload.
|
2020-07-22 09:32:32 -06:00
|
|
|
ids := s.getIDsForURI(uri)
|
2020-01-30 19:29:41 -07:00
|
|
|
reload := len(ids) == 0
|
|
|
|
for _, id := range ids {
|
|
|
|
// Reload package metadata if any of the metadata has missing
|
|
|
|
// dependencies, in case something has changed since the last time we
|
|
|
|
// reloaded it.
|
2020-02-26 13:27:56 -07:00
|
|
|
if m := s.getMetadata(id); m == nil {
|
2020-01-30 19:29:41 -07:00
|
|
|
reload = true
|
|
|
|
break
|
|
|
|
}
|
2020-02-26 13:27:56 -07:00
|
|
|
// TODO(golang/go#36918): Previously, we would reload any package with
|
|
|
|
// missing dependencies. This is expensive and results in too many
|
|
|
|
// calls to packages.Load. Determine what we should do instead.
|
2020-01-30 19:29:41 -07:00
|
|
|
}
|
|
|
|
if reload {
|
2020-07-22 09:32:32 -06:00
|
|
|
if err := s.load(ctx, fileURI(uri)); err != nil {
|
2019-11-20 15:57:05 -07:00
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
2020-01-30 19:29:41 -07:00
|
|
|
// Get the list of IDs from the snapshot again, in case it has changed.
|
2020-07-22 09:32:32 -06:00
|
|
|
var pkgs []source.Package
|
|
|
|
for _, id := range s.getIDsForURI(uri) {
|
|
|
|
pkg, err := s.checkedPackage(ctx, id, source.ParseFull)
|
2020-01-30 19:29:41 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-12-03 17:56:44 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
pkgs = append(pkgs, pkg)
|
2019-12-03 17:56:44 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
return pkgs, nil
|
2019-12-03 17:56:44 -07:00
|
|
|
}
|
|
|
|
|
2020-07-22 09:32:32 -06:00
|
|
|
func (s *snapshot) checkedPackage(ctx context.Context, id packageID, mode source.ParseMode) (*pkg, error) {
|
|
|
|
ph, err := s.buildPackageHandle(ctx, id, mode)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
2019-11-20 15:57:05 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
return ph.check(ctx, s)
|
2019-11-20 15:57:05 -07:00
|
|
|
}
|
|
|
|
|
2020-07-22 09:32:32 -06:00
|
|
|
func (s *snapshot) GetReverseDependencies(ctx context.Context, id string) ([]source.Package, error) {
|
2020-01-10 15:18:59 -07:00
|
|
|
if err := s.awaitLoaded(ctx); err != nil {
|
2019-12-19 12:31:39 -07:00
|
|
|
return nil, err
|
|
|
|
}
|
2019-11-20 15:57:05 -07:00
|
|
|
ids := make(map[packageID]struct{})
|
|
|
|
s.transitiveReverseDependencies(packageID(id), ids)
|
|
|
|
|
|
|
|
// Make sure to delete the original package ID from the map.
|
|
|
|
delete(ids, packageID(id))
|
|
|
|
|
2020-07-22 09:32:32 -06:00
|
|
|
var pkgs []source.Package
|
2019-11-20 15:57:05 -07:00
|
|
|
for id := range ids {
|
2020-07-22 09:32:32 -06:00
|
|
|
pkg, err := s.checkedPackage(ctx, id, source.ParseFull)
|
2020-01-10 15:18:59 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
pkgs = append(pkgs, pkg)
|
2019-11-20 15:57:05 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
return pkgs, nil
|
2019-11-20 15:57:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// transitiveReverseDependencies populates the uris map with file URIs
|
|
|
|
// belonging to the provided package and its transitive reverse dependencies.
|
|
|
|
func (s *snapshot) transitiveReverseDependencies(id packageID, ids map[packageID]struct{}) {
|
|
|
|
if _, ok := ids[id]; ok {
|
|
|
|
return
|
|
|
|
}
|
2019-12-19 12:31:39 -07:00
|
|
|
if s.getMetadata(id) == nil {
|
2019-11-20 15:57:05 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
ids[id] = struct{}{}
|
|
|
|
importedBy := s.getImportedBy(id)
|
|
|
|
for _, parentID := range importedBy {
|
|
|
|
s.transitiveReverseDependencies(parentID, ids)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 15:17:13 -06:00
|
|
|
func (s *snapshot) getGoFile(key parseKey) *parseGoHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
return s.goFiles[key]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) addGoFile(key parseKey, pgh *parseGoHandle) *parseGoHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
if existing, ok := s.goFiles[key]; ok {
|
|
|
|
return existing
|
|
|
|
}
|
|
|
|
s.goFiles[key] = pgh
|
|
|
|
return pgh
|
|
|
|
}
|
|
|
|
|
2020-06-19 17:07:57 -06:00
|
|
|
func (s *snapshot) getModHandle(uri span.URI) *parseModHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
return s.parseModHandles[uri]
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) getModWhyHandle() *modWhyHandle {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
return s.modWhyHandle
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) getModUpgradeHandle() *modUpgradeHandle {
|
2020-02-18 13:47:38 -07:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2020-06-19 17:07:57 -06:00
|
|
|
return s.modUpgradeHandle
|
2020-02-18 13:47:38 -07:00
|
|
|
}
|
|
|
|
|
2020-06-16 20:10:27 -06:00
|
|
|
func (s *snapshot) getModTidyHandle() *modTidyHandle {
|
2020-02-14 09:33:11 -07:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
return s.modTidyHandle
|
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) getImportedBy(id packageID) []packageID {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-12-13 15:12:11 -07:00
|
|
|
return s.getImportedByLocked(id)
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-12-13 15:12:11 -07:00
|
|
|
func (s *snapshot) getImportedByLocked(id packageID) []packageID {
|
2019-09-27 11:17:59 -06:00
|
|
|
// If we haven't rebuilt the import graph since creating the snapshot.
|
|
|
|
if len(s.importedBy) == 0 {
|
|
|
|
s.rebuildImportGraph()
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
return s.importedBy[id]
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-12-19 12:31:39 -07:00
|
|
|
func (s *snapshot) clearAndRebuildImportGraph() {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
// Completely invalidate the original map.
|
|
|
|
s.importedBy = make(map[packageID][]packageID)
|
|
|
|
s.rebuildImportGraph()
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) rebuildImportGraph() {
|
|
|
|
for id, m := range s.metadata {
|
|
|
|
for _, importID := range m.deps {
|
|
|
|
s.importedBy[importID] = append(s.importedBy[importID], id)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-07 12:57:44 -06:00
|
|
|
func (s *snapshot) addPackageHandle(ph *packageHandle) *packageHandle {
|
2019-09-27 11:17:59 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2020-05-07 12:57:44 -06:00
|
|
|
// If the package handle has already been cached,
|
|
|
|
// return the cached handle instead of overriding it.
|
|
|
|
if ph, ok := s.packages[ph.packageKey()]; ok {
|
|
|
|
return ph
|
2019-09-27 11:17:59 -06:00
|
|
|
}
|
2019-11-29 23:17:57 -07:00
|
|
|
s.packages[ph.packageKey()] = ph
|
2020-05-07 12:57:44 -06:00
|
|
|
return ph
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2020-01-10 15:18:59 -07:00
|
|
|
func (s *snapshot) workspacePackageIDs() (ids []packageID) {
|
2019-11-29 21:51:14 -07:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
for id := range s.workspacePackages {
|
2020-01-10 15:18:59 -07:00
|
|
|
ids = append(ids, id)
|
2019-11-29 21:51:14 -07:00
|
|
|
}
|
|
|
|
return ids
|
|
|
|
}
|
|
|
|
|
2020-07-28 16:18:43 -06:00
|
|
|
func (s *snapshot) WorkspaceDirectories(ctx context.Context) []span.URI {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
var dirs []span.URI
|
|
|
|
for d := range s.workspaceDirectories {
|
|
|
|
dirs = append(dirs, d)
|
|
|
|
}
|
|
|
|
return dirs
|
|
|
|
}
|
|
|
|
|
2020-07-22 09:32:32 -06:00
|
|
|
func (s *snapshot) WorkspacePackages(ctx context.Context) ([]source.Package, error) {
|
2020-01-10 15:18:59 -07:00
|
|
|
if err := s.awaitLoaded(ctx); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
var pkgs []source.Package
|
2020-01-10 15:18:59 -07:00
|
|
|
for _, pkgID := range s.workspacePackageIDs() {
|
2020-07-22 09:32:32 -06:00
|
|
|
pkg, err := s.checkedPackage(ctx, pkgID, source.ParseFull)
|
2020-01-10 15:18:59 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
pkgs = append(pkgs, pkg)
|
2020-01-10 15:18:59 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
return pkgs, nil
|
2020-01-10 15:18:59 -07:00
|
|
|
}
|
|
|
|
|
2020-07-22 09:32:32 -06:00
|
|
|
func (s *snapshot) KnownPackages(ctx context.Context) ([]source.Package, error) {
|
2020-01-10 15:18:59 -07:00
|
|
|
if err := s.awaitLoaded(ctx); err != nil {
|
2019-12-19 12:31:39 -07:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
|
|
|
|
// The WorkspaceSymbols implementation relies on this function returning
|
|
|
|
// workspace packages first.
|
|
|
|
wsPackages := s.workspacePackageIDs()
|
|
|
|
var otherPackages []packageID
|
2019-11-11 14:51:47 -07:00
|
|
|
s.mu.Lock()
|
2020-07-22 09:32:32 -06:00
|
|
|
for id := range s.metadata {
|
|
|
|
if _, ok := s.workspacePackages[id]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
otherPackages = append(otherPackages, id)
|
2019-11-12 18:16:00 -07:00
|
|
|
}
|
|
|
|
s.mu.Unlock()
|
2019-11-11 14:51:47 -07:00
|
|
|
|
2020-07-22 09:32:32 -06:00
|
|
|
var pkgs []source.Package
|
|
|
|
for _, id := range wsPackages {
|
|
|
|
pkg, err := s.checkedPackage(ctx, id, source.ParseFull)
|
2019-11-12 18:16:00 -07:00
|
|
|
if err != nil {
|
2020-01-07 19:37:41 -07:00
|
|
|
return nil, err
|
2019-11-12 18:16:00 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
pkgs = append(pkgs, pkg)
|
2019-12-27 14:44:33 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
for _, id := range otherPackages {
|
|
|
|
pkg, err := s.checkedPackage(ctx, id, source.ParseExported)
|
2019-11-11 14:51:47 -07:00
|
|
|
if err != nil {
|
2020-01-14 11:59:17 -07:00
|
|
|
return nil, err
|
2019-11-11 14:51:47 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
pkgs = append(pkgs, pkg)
|
2019-11-11 14:51:47 -07:00
|
|
|
}
|
2020-07-22 09:32:32 -06:00
|
|
|
return pkgs, nil
|
2019-11-11 14:51:47 -07:00
|
|
|
}
|
|
|
|
|
2020-01-10 15:18:59 -07:00
|
|
|
func (s *snapshot) CachedImportPaths(ctx context.Context) (map[string]source.Package, error) {
|
|
|
|
// Don't reload workspace package metadata.
|
|
|
|
// This function is meant to only return currently cached information.
|
2020-01-27 18:25:48 -07:00
|
|
|
s.view.awaitInitialized(ctx)
|
2020-01-10 15:18:59 -07:00
|
|
|
|
2019-11-01 15:59:28 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
results := map[string]source.Package{}
|
2019-11-29 23:17:57 -07:00
|
|
|
for _, ph := range s.packages {
|
|
|
|
cachedPkg, err := ph.cached()
|
2019-11-01 15:59:28 -06:00
|
|
|
if err != nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
for importPath, newPkg := range cachedPkg.imports {
|
|
|
|
if oldPkg, ok := results[string(importPath)]; ok {
|
2020-07-22 09:32:32 -06:00
|
|
|
// Using the same trick as NarrowestPackage, prefer non-variants.
|
2019-11-20 14:15:00 -07:00
|
|
|
if len(newPkg.compiledGoFiles) < len(oldPkg.(*pkg).compiledGoFiles) {
|
2019-11-01 15:59:28 -06:00
|
|
|
results[string(importPath)] = newPkg
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
results[string(importPath)] = newPkg
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2020-01-10 15:18:59 -07:00
|
|
|
return results, nil
|
2019-11-01 15:59:28 -06:00
|
|
|
}
|
|
|
|
|
2020-02-07 12:38:36 -07:00
|
|
|
func (s *snapshot) getPackage(id packageID, mode source.ParseMode) *packageHandle {
|
2019-10-01 13:21:06 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
key := packageKey{
|
|
|
|
id: id,
|
2020-02-07 12:38:36 -07:00
|
|
|
mode: mode,
|
2019-10-01 13:21:06 -06:00
|
|
|
}
|
|
|
|
return s.packages[key]
|
|
|
|
}
|
|
|
|
|
2019-11-21 16:55:49 -07:00
|
|
|
func (s *snapshot) getActionHandle(id packageID, m source.ParseMode, a *analysis.Analyzer) *actionHandle {
|
2019-10-14 14:13:06 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
key := actionKey{
|
|
|
|
pkg: packageKey{
|
|
|
|
id: id,
|
|
|
|
mode: m,
|
|
|
|
},
|
2019-10-15 11:27:09 -06:00
|
|
|
analyzer: a,
|
2019-10-14 14:13:06 -06:00
|
|
|
}
|
|
|
|
return s.actions[key]
|
|
|
|
}
|
|
|
|
|
2020-05-07 12:57:44 -06:00
|
|
|
func (s *snapshot) addActionHandle(ah *actionHandle) *actionHandle {
|
2019-10-14 14:13:06 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
key := actionKey{
|
2019-10-15 11:27:09 -06:00
|
|
|
analyzer: ah.analyzer,
|
2019-10-14 14:13:06 -06:00
|
|
|
pkg: packageKey{
|
2020-07-15 15:15:09 -06:00
|
|
|
id: ah.pkg.m.id,
|
2019-10-14 14:13:06 -06:00
|
|
|
mode: ah.pkg.mode,
|
|
|
|
},
|
|
|
|
}
|
2020-05-07 12:57:44 -06:00
|
|
|
if ah, ok := s.actions[key]; ok {
|
|
|
|
return ah
|
2019-10-14 14:13:06 -06:00
|
|
|
}
|
|
|
|
s.actions[key] = ah
|
2020-05-07 12:57:44 -06:00
|
|
|
return ah
|
2019-10-14 14:13:06 -06:00
|
|
|
}
|
|
|
|
|
2020-01-30 19:29:41 -07:00
|
|
|
func (s *snapshot) getIDsForURI(uri span.URI) []packageID {
|
2019-09-27 11:17:59 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2020-01-30 19:29:41 -07:00
|
|
|
return s.ids[uri]
|
2020-01-23 17:24:51 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) getMetadataForURILocked(uri span.URI) (metadata []*metadata) {
|
|
|
|
// TODO(matloob): uri can be a file or directory. Should we update the mappings
|
|
|
|
// to map directories to their contained packages?
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
for _, id := range s.ids[uri] {
|
|
|
|
if m, ok := s.metadata[id]; ok {
|
|
|
|
metadata = append(metadata, m)
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
return metadata
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) getMetadata(id packageID) *metadata {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
return s.metadata[id]
|
|
|
|
}
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
func (s *snapshot) addID(uri span.URI, id packageID) {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
2019-09-23 18:06:15 -06:00
|
|
|
|
2020-02-19 15:14:33 -07:00
|
|
|
for i, existingID := range s.ids[uri] {
|
|
|
|
// TODO: We should make sure not to set duplicate IDs,
|
|
|
|
// and instead panic here. This can be done by making sure not to
|
|
|
|
// reset metadata information for packages we've already seen.
|
2019-10-01 13:21:06 -06:00
|
|
|
if existingID == id {
|
2020-02-19 15:14:33 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
// If we are setting a real ID, when the package had only previously
|
|
|
|
// had a command-line-arguments ID, we should just replace it.
|
|
|
|
if existingID == "command-line-arguments" {
|
|
|
|
s.ids[uri][i] = id
|
2020-03-24 16:29:27 -06:00
|
|
|
// Delete command-line-arguments if it was a workspace package.
|
|
|
|
delete(s.workspacePackages, existingID)
|
2019-10-01 13:21:06 -06:00
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
2019-09-27 11:17:59 -06:00
|
|
|
s.ids[uri] = append(s.ids[uri], id)
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
|
|
|
|
2020-01-07 19:37:41 -07:00
|
|
|
func (s *snapshot) isWorkspacePackage(id packageID) (packagePath, bool) {
|
2019-12-19 12:31:39 -07:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2020-01-07 19:37:41 -07:00
|
|
|
scope, ok := s.workspacePackages[id]
|
|
|
|
return scope, ok
|
2019-12-19 12:31:39 -07:00
|
|
|
}
|
2020-07-11 22:26:29 -06:00
|
|
|
|
2020-07-26 16:01:39 -06:00
|
|
|
func (s *snapshot) FindFile(uri span.URI) source.VersionedFileHandle {
|
2020-04-23 21:24:24 -06:00
|
|
|
f, err := s.view.getFile(uri)
|
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
return s.files[f.URI()]
|
|
|
|
}
|
2019-12-19 12:31:39 -07:00
|
|
|
|
2019-12-17 16:57:54 -07:00
|
|
|
// GetFile returns a File for the given URI. It will always succeed because it
|
|
|
|
// adds the file to the managed set if needed.
|
2020-07-26 16:01:39 -06:00
|
|
|
func (s *snapshot) GetFile(ctx context.Context, uri span.URI) (source.VersionedFileHandle, error) {
|
2020-01-21 13:36:50 -07:00
|
|
|
f, err := s.view.getFile(uri)
|
2019-12-17 16:57:54 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-06 14:20:50 -07:00
|
|
|
|
2019-09-27 11:17:59 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
if fh, ok := s.files[f.URI()]; ok {
|
|
|
|
return fh, nil
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
|
2020-06-19 17:07:57 -06:00
|
|
|
fh, err := s.view.session.cache.getFile(ctx, uri)
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-07-26 16:01:39 -06:00
|
|
|
closed := &closedFile{fh}
|
|
|
|
s.files[f.URI()] = closed
|
|
|
|
return closed, nil
|
2020-01-09 20:45:06 -07:00
|
|
|
}
|
|
|
|
|
2020-02-06 14:20:50 -07:00
|
|
|
func (s *snapshot) IsOpen(uri span.URI) bool {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
_, open := s.files[uri].(*overlay)
|
|
|
|
return open
|
|
|
|
}
|
|
|
|
|
2020-02-14 09:52:17 -07:00
|
|
|
func (s *snapshot) IsSaved(uri span.URI) bool {
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
|
|
|
ovl, open := s.files[uri].(*overlay)
|
|
|
|
return !open || ovl.saved
|
|
|
|
}
|
|
|
|
|
2020-01-10 15:18:59 -07:00
|
|
|
func (s *snapshot) awaitLoaded(ctx context.Context) error {
|
|
|
|
// Do not return results until the snapshot's view has been initialized.
|
2020-01-27 18:25:48 -07:00
|
|
|
s.view.awaitInitialized(ctx)
|
|
|
|
|
2020-01-28 22:08:22 -07:00
|
|
|
if err := s.reloadWorkspace(ctx); err != nil {
|
2020-01-25 14:41:35 -07:00
|
|
|
return err
|
|
|
|
}
|
2020-05-28 19:21:29 -06:00
|
|
|
if err := s.reloadOrphanedFiles(ctx); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// If we still have absolutely no metadata, check if the view failed to
|
|
|
|
// initialize and return any errors.
|
|
|
|
// TODO(rstambler): Should we clear the error after we return it?
|
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
if len(s.metadata) == 0 {
|
|
|
|
return s.view.initializedErr
|
|
|
|
}
|
|
|
|
return nil
|
2020-01-10 15:18:59 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// reloadWorkspace reloads the metadata for all invalidated workspace packages.
|
2020-01-28 22:08:22 -07:00
|
|
|
func (s *snapshot) reloadWorkspace(ctx context.Context) error {
|
2020-01-25 14:41:35 -07:00
|
|
|
// If the view's build configuration is invalid, we cannot reload by package path.
|
|
|
|
// Just reload the directory instead.
|
|
|
|
if !s.view.hasValidBuildConfiguration {
|
2020-01-30 18:44:00 -07:00
|
|
|
return s.load(ctx, viewLoadScope("LOAD_INVALID_VIEW"))
|
2020-01-25 14:41:35 -07:00
|
|
|
}
|
|
|
|
|
2020-01-23 17:24:51 -07:00
|
|
|
// See which of the workspace packages are missing metadata.
|
|
|
|
s.mu.Lock()
|
|
|
|
var pkgPaths []interface{}
|
|
|
|
for id, pkgPath := range s.workspacePackages {
|
2020-03-23 18:16:00 -06:00
|
|
|
// Don't try to reload "command-line-arguments" directly.
|
|
|
|
if pkgPath == "command-line-arguments" {
|
|
|
|
continue
|
|
|
|
}
|
2020-01-23 17:24:51 -07:00
|
|
|
if s.metadata[id] == nil {
|
|
|
|
pkgPaths = append(pkgPaths, pkgPath)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
s.mu.Unlock()
|
|
|
|
|
2020-01-25 14:41:35 -07:00
|
|
|
if len(pkgPaths) == 0 {
|
2020-01-28 22:08:22 -07:00
|
|
|
return nil
|
2020-01-23 17:24:51 -07:00
|
|
|
}
|
2020-01-30 18:44:00 -07:00
|
|
|
return s.load(ctx, pkgPaths...)
|
2020-01-25 14:41:35 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) reloadOrphanedFiles(ctx context.Context) error {
|
2020-01-23 17:24:51 -07:00
|
|
|
// When we load ./... or a package path directly, we may not get packages
|
|
|
|
// that exist only in overlays. As a workaround, we search all of the files
|
|
|
|
// available in the snapshot and reload their metadata individually using a
|
|
|
|
// file= query if the metadata is unavailable.
|
2020-01-25 14:41:35 -07:00
|
|
|
scopes := s.orphanedFileScopes()
|
|
|
|
if len(scopes) == 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-01-30 18:44:00 -07:00
|
|
|
err := s.load(ctx, scopes...)
|
2020-01-25 14:41:35 -07:00
|
|
|
|
|
|
|
// If we failed to load some files, i.e. they have no metadata,
|
|
|
|
// mark the failures so we don't bother retrying until the file's
|
|
|
|
// content changes.
|
|
|
|
//
|
|
|
|
// TODO(rstambler): This may be an overestimate if the load stopped
|
|
|
|
// early for an unrelated errors. Add a fallback?
|
|
|
|
//
|
|
|
|
// Check for context cancellation so that we don't incorrectly mark files
|
|
|
|
// as unloadable, but don't return before setting all workspace packages.
|
|
|
|
if ctx.Err() == nil && err != nil {
|
2020-03-24 16:29:27 -06:00
|
|
|
event.Error(ctx, "reloadOrphanedFiles: failed to load", err, tag.Query.Of(scopes))
|
2020-01-25 14:41:35 -07:00
|
|
|
s.mu.Lock()
|
|
|
|
for _, scope := range scopes {
|
|
|
|
uri := span.URI(scope.(fileURI))
|
|
|
|
if s.getMetadataForURILocked(uri) == nil {
|
|
|
|
s.unloadableFiles[uri] = struct{}{}
|
2020-01-23 17:24:51 -07:00
|
|
|
}
|
|
|
|
}
|
2020-01-25 14:41:35 -07:00
|
|
|
s.mu.Unlock()
|
|
|
|
}
|
2020-01-23 17:24:51 -07:00
|
|
|
return nil
|
2020-01-11 21:59:57 -07:00
|
|
|
}
|
|
|
|
|
2020-01-23 17:24:51 -07:00
|
|
|
func (s *snapshot) orphanedFileScopes() []interface{} {
|
2020-01-10 15:18:59 -07:00
|
|
|
s.mu.Lock()
|
2020-01-11 21:59:57 -07:00
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2020-01-23 17:24:51 -07:00
|
|
|
scopeSet := make(map[span.URI]struct{})
|
|
|
|
for uri, fh := range s.files {
|
|
|
|
// Don't try to reload metadata for go.mod files.
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
if fh.Kind() != source.Go {
|
2020-01-23 17:24:51 -07:00
|
|
|
continue
|
|
|
|
}
|
2020-01-27 14:39:28 -07:00
|
|
|
// If the URI doesn't belong to this view, then it's not in a workspace
|
|
|
|
// package and should not be reloaded directly.
|
|
|
|
if !contains(s.view.session.viewsOf(uri), s.view) {
|
|
|
|
continue
|
|
|
|
}
|
2020-01-23 17:24:51 -07:00
|
|
|
// Don't reload metadata for files we've already deemed unloadable.
|
|
|
|
if _, ok := s.unloadableFiles[uri]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if s.getMetadataForURILocked(uri) == nil {
|
|
|
|
scopeSet[uri] = struct{}{}
|
2020-01-10 15:18:59 -07:00
|
|
|
}
|
|
|
|
}
|
2020-01-23 17:24:51 -07:00
|
|
|
var scopes []interface{}
|
|
|
|
for uri := range scopeSet {
|
|
|
|
scopes = append(scopes, fileURI(uri))
|
2020-01-10 15:18:59 -07:00
|
|
|
}
|
2020-01-23 17:24:51 -07:00
|
|
|
return scopes
|
|
|
|
}
|
|
|
|
|
2020-06-02 08:57:20 -06:00
|
|
|
func contains(views []*View, view *View) bool {
|
2020-01-23 17:24:51 -07:00
|
|
|
for _, v := range views {
|
|
|
|
if v == view {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
2020-07-26 16:01:39 -06:00
|
|
|
func (s *snapshot) clone(ctx context.Context, withoutURIs map[span.URI]source.VersionedFileHandle, forceReloadMetadata bool) *snapshot {
|
2019-09-27 11:17:59 -06:00
|
|
|
s.mu.Lock()
|
|
|
|
defer s.mu.Unlock()
|
|
|
|
|
2019-09-23 18:06:15 -06:00
|
|
|
result := &snapshot{
|
2020-07-28 16:18:43 -06:00
|
|
|
id: s.id + 1,
|
|
|
|
view: s.view,
|
|
|
|
builtin: s.builtin,
|
|
|
|
ids: make(map[span.URI][]packageID),
|
|
|
|
importedBy: make(map[packageID][]packageID),
|
|
|
|
metadata: make(map[packageID]*metadata),
|
|
|
|
packages: make(map[packageKey]*packageHandle),
|
|
|
|
actions: make(map[actionKey]*actionHandle),
|
|
|
|
files: make(map[span.URI]source.VersionedFileHandle),
|
|
|
|
goFiles: make(map[parseKey]*parseGoHandle),
|
|
|
|
workspaceDirectories: make(map[span.URI]struct{}),
|
|
|
|
workspacePackages: make(map[packageID]packagePath),
|
|
|
|
unloadableFiles: make(map[span.URI]struct{}),
|
|
|
|
parseModHandles: make(map[span.URI]*parseModHandle),
|
|
|
|
modTidyHandle: s.modTidyHandle,
|
|
|
|
modUpgradeHandle: s.modUpgradeHandle,
|
|
|
|
modWhyHandle: s.modWhyHandle,
|
2019-09-27 11:17:59 -06:00
|
|
|
}
|
2019-12-16 14:34:44 -07:00
|
|
|
|
|
|
|
// Copy all of the FileHandles.
|
2019-09-27 11:17:59 -06:00
|
|
|
for k, v := range s.files {
|
|
|
|
result.files[k] = v
|
2019-09-23 18:06:15 -06:00
|
|
|
}
|
2020-01-23 17:24:51 -07:00
|
|
|
// Copy the set of unloadable files.
|
|
|
|
for k, v := range s.unloadableFiles {
|
|
|
|
result.unloadableFiles[k] = v
|
|
|
|
}
|
2020-02-18 13:47:38 -07:00
|
|
|
// Copy all of the modHandles.
|
2020-06-19 17:07:57 -06:00
|
|
|
for k, v := range s.parseModHandles {
|
|
|
|
result.parseModHandles[k] = v
|
2020-02-18 13:47:38 -07:00
|
|
|
}
|
2020-07-28 16:18:43 -06:00
|
|
|
// Copy all of the workspace directories. They may be reset later.
|
|
|
|
for k, v := range s.workspaceDirectories {
|
|
|
|
result.workspaceDirectories[k] = v
|
|
|
|
}
|
2020-01-09 17:22:08 -07:00
|
|
|
|
2020-07-24 15:17:13 -06:00
|
|
|
for k, v := range s.goFiles {
|
|
|
|
if _, ok := withoutURIs[k.file.URI]; ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.goFiles[k] = v
|
|
|
|
}
|
|
|
|
|
2020-01-22 17:56:26 -07:00
|
|
|
// transitiveIDs keeps track of transitive reverse dependencies.
|
|
|
|
// If an ID is present in the map, invalidate its types.
|
|
|
|
// If an ID's value is true, invalidate its metadata too.
|
|
|
|
transitiveIDs := make(map[packageID]bool)
|
2020-02-06 14:20:50 -07:00
|
|
|
for withoutURI, currentFH := range withoutURIs {
|
2020-01-22 17:56:26 -07:00
|
|
|
directIDs := map[packageID]struct{}{}
|
|
|
|
|
|
|
|
// Collect all of the package IDs that correspond to the given file.
|
|
|
|
// TODO: if the file has moved into a new package, we should invalidate that too.
|
|
|
|
for _, id := range s.ids[withoutURI] {
|
|
|
|
directIDs[id] = struct{}{}
|
|
|
|
}
|
2020-02-06 14:20:50 -07:00
|
|
|
// The original FileHandle for this URI is cached on the snapshot.
|
2020-01-22 17:56:26 -07:00
|
|
|
originalFH := s.files[withoutURI]
|
|
|
|
|
|
|
|
// Check if the file's package name or imports have changed,
|
|
|
|
// and if so, invalidate this file's packages' metadata.
|
2020-05-14 12:02:48 -06:00
|
|
|
invalidateMetadata := forceReloadMetadata || s.shouldInvalidateMetadata(ctx, originalFH, currentFH)
|
2020-01-22 17:56:26 -07:00
|
|
|
|
2020-02-14 09:33:11 -07:00
|
|
|
// Invalidate the previous modTidyHandle if any of the files have been
|
|
|
|
// saved or if any of the metadata has been invalidated.
|
|
|
|
if invalidateMetadata || fileWasSaved(originalFH, currentFH) {
|
|
|
|
result.modTidyHandle = nil
|
2020-06-19 17:07:57 -06:00
|
|
|
result.modUpgradeHandle = nil
|
|
|
|
result.modWhyHandle = nil
|
2020-02-14 09:33:11 -07:00
|
|
|
}
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
if currentFH.Kind() == source.Mod {
|
2020-07-29 11:28:11 -06:00
|
|
|
// If the view's go.mod file's contents have changed, invalidate the
|
|
|
|
// metadata for every known package in the snapshot.
|
2020-02-14 09:33:11 -07:00
|
|
|
if invalidateMetadata {
|
2020-07-29 11:28:11 -06:00
|
|
|
for k := range s.packages {
|
|
|
|
directIDs[k.id] = struct{}{}
|
2020-02-14 09:33:11 -07:00
|
|
|
}
|
2020-01-22 17:56:26 -07:00
|
|
|
}
|
2020-07-28 16:18:43 -06:00
|
|
|
|
2020-06-19 17:07:57 -06:00
|
|
|
delete(result.parseModHandles, withoutURI)
|
2020-07-28 16:18:43 -06:00
|
|
|
|
|
|
|
if currentFH.URI() == s.view.modURI {
|
|
|
|
// The go.mod's replace directives may have changed. We may
|
|
|
|
// need to update our set of workspace directories. Use the new
|
|
|
|
// snapshot, as it can be locked without causing issues.
|
|
|
|
result.workspaceDirectories = result.findWorkspaceDirectories(ctx, currentFH)
|
|
|
|
}
|
2020-01-22 17:56:26 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// If this is a file we don't yet know about,
|
|
|
|
// then we do not yet know what packages it should belong to.
|
|
|
|
// Make a rough estimate of what metadata to invalidate by finding the package IDs
|
|
|
|
// of all of the files in the same directory as this one.
|
|
|
|
// TODO(rstambler): Speed this up by mapping directories to filenames.
|
|
|
|
if len(directIDs) == 0 {
|
|
|
|
if dirStat, err := os.Stat(filepath.Dir(withoutURI.Filename())); err == nil {
|
|
|
|
for uri := range s.files {
|
|
|
|
if fdirStat, err := os.Stat(filepath.Dir(uri.Filename())); err == nil {
|
|
|
|
if os.SameFile(dirStat, fdirStat) {
|
|
|
|
for _, id := range s.ids[uri] {
|
|
|
|
directIDs[id] = struct{}{}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Invalidate reverse dependencies too.
|
|
|
|
// TODO(heschi): figure out the locking model and use transitiveReverseDeps?
|
|
|
|
var addRevDeps func(packageID)
|
|
|
|
addRevDeps = func(id packageID) {
|
2020-07-25 01:36:36 -06:00
|
|
|
current, seen := transitiveIDs[id]
|
|
|
|
newInvalidateMetadata := current || invalidateMetadata
|
|
|
|
|
|
|
|
// If we've already seen this ID, and the value of invalidate
|
|
|
|
// metadata has not changed, we can return early.
|
|
|
|
if seen && current == newInvalidateMetadata {
|
2020-01-22 17:56:26 -07:00
|
|
|
return
|
|
|
|
}
|
2020-07-25 01:36:36 -06:00
|
|
|
transitiveIDs[id] = newInvalidateMetadata
|
2020-01-22 17:56:26 -07:00
|
|
|
for _, rid := range s.getImportedByLocked(id) {
|
|
|
|
addRevDeps(rid)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for id := range directIDs {
|
|
|
|
addRevDeps(id)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Handle the invalidated file; it may have new contents or not exist.
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
if _, err := currentFH.Read(); os.IsNotExist(err) {
|
2020-01-22 17:56:26 -07:00
|
|
|
delete(result.files, withoutURI)
|
|
|
|
} else {
|
|
|
|
result.files[withoutURI] = currentFH
|
|
|
|
}
|
2020-01-23 17:24:51 -07:00
|
|
|
// Make sure to remove the changed file from the unloadable set.
|
|
|
|
delete(result.unloadableFiles, withoutURI)
|
2019-12-16 14:34:44 -07:00
|
|
|
}
|
2019-10-01 13:21:06 -06:00
|
|
|
// Copy the package type information.
|
|
|
|
for k, v := range s.packages {
|
2019-12-17 17:18:48 -07:00
|
|
|
if _, ok := transitiveIDs[k.id]; ok {
|
2019-10-01 13:21:06 -06:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.packages[k] = v
|
|
|
|
}
|
2019-10-14 14:13:06 -06:00
|
|
|
// Copy the package analysis information.
|
|
|
|
for k, v := range s.actions {
|
2019-12-17 17:18:48 -07:00
|
|
|
if _, ok := transitiveIDs[k.pkg.id]; ok {
|
2019-10-14 14:13:06 -06:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.actions[k] = v
|
|
|
|
}
|
2019-12-17 17:18:48 -07:00
|
|
|
// Copy the package metadata. We only need to invalidate packages directly
|
|
|
|
// containing the affected file, and only if it changed in a relevant way.
|
2019-09-23 18:06:15 -06:00
|
|
|
for k, v := range s.metadata {
|
2020-01-22 17:56:26 -07:00
|
|
|
if invalidateMetadata, ok := transitiveIDs[k]; invalidateMetadata && ok {
|
2019-09-23 18:06:15 -06:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
result.metadata[k] = v
|
|
|
|
}
|
2020-02-19 15:14:33 -07:00
|
|
|
// Copy the URI to package ID mappings, skipping only those URIs whose
|
|
|
|
// metadata will be reloaded in future calls to load.
|
2020-06-15 19:22:35 -06:00
|
|
|
copyIDs:
|
2020-02-19 15:14:33 -07:00
|
|
|
for k, ids := range s.ids {
|
|
|
|
for _, id := range ids {
|
|
|
|
if invalidateMetadata, ok := transitiveIDs[id]; invalidateMetadata && ok {
|
2020-06-15 19:22:35 -06:00
|
|
|
continue copyIDs
|
2020-02-19 15:14:33 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
result.ids[k] = ids
|
|
|
|
}
|
2020-03-24 16:29:27 -06:00
|
|
|
// Copy the set of initally loaded packages.
|
|
|
|
for id, pkgPath := range s.workspacePackages {
|
|
|
|
if id == "command-line-arguments" {
|
|
|
|
if invalidateMetadata, ok := transitiveIDs[id]; invalidateMetadata && ok {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
2020-06-15 19:22:35 -06:00
|
|
|
|
|
|
|
// If all the files we know about in a package have been deleted,
|
|
|
|
// the package is gone and we should no longer try to load it.
|
|
|
|
if m := s.metadata[id]; m != nil {
|
|
|
|
hasFiles := false
|
|
|
|
for _, uri := range s.metadata[id].goFiles {
|
|
|
|
if _, ok := result.files[uri]; ok {
|
|
|
|
hasFiles = true
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if !hasFiles {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-24 16:29:27 -06:00
|
|
|
result.workspacePackages[id] = pkgPath
|
|
|
|
}
|
2019-10-01 13:21:06 -06:00
|
|
|
// Don't bother copying the importedBy graph,
|
|
|
|
// as it changes each time we update metadata.
|
2019-09-23 18:06:15 -06:00
|
|
|
return result
|
|
|
|
}
|
|
|
|
|
2020-03-05 10:43:50 -07:00
|
|
|
// fileWasSaved reports whether the FileHandle passed in has been saved. It
|
|
|
|
// accomplishes this by checking to see if the original and current FileHandles
|
|
|
|
// are both overlays, and if the current FileHandle is saved while the original
|
|
|
|
// FileHandle was not saved.
|
2020-02-14 09:33:11 -07:00
|
|
|
func fileWasSaved(originalFH, currentFH source.FileHandle) bool {
|
|
|
|
c, ok := currentFH.(*overlay)
|
2020-03-05 10:43:50 -07:00
|
|
|
if !ok || c == nil {
|
2020-02-14 09:33:11 -07:00
|
|
|
return true
|
|
|
|
}
|
2020-03-05 10:43:50 -07:00
|
|
|
o, ok := originalFH.(*overlay)
|
|
|
|
if !ok || o == nil {
|
2020-02-14 09:33:11 -07:00
|
|
|
return c.saved
|
|
|
|
}
|
2020-03-05 10:43:50 -07:00
|
|
|
return !o.saved && c.saved
|
2020-02-14 09:33:11 -07:00
|
|
|
}
|
|
|
|
|
2020-01-30 19:29:41 -07:00
|
|
|
// shouldInvalidateMetadata reparses a file's package and import declarations to
|
2020-01-28 22:12:28 -07:00
|
|
|
// determine if the file requires a metadata reload.
|
2020-01-30 19:29:41 -07:00
|
|
|
func (s *snapshot) shouldInvalidateMetadata(ctx context.Context, originalFH, currentFH source.FileHandle) bool {
|
2020-01-28 22:12:28 -07:00
|
|
|
if originalFH == nil {
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
return currentFH.Kind() == source.Go
|
2020-01-28 22:12:28 -07:00
|
|
|
}
|
|
|
|
// If the file hasn't changed, there's no need to reload.
|
2020-07-26 16:01:39 -06:00
|
|
|
if originalFH.FileIdentity() == currentFH.FileIdentity() {
|
2020-01-28 22:12:28 -07:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
// If a go.mod file's contents have changed, always invalidate metadata.
|
internal/lsp: read files eagerly
We use file identities pervasively throughout gopls. Prior to this
change, the identity is the modification date of an unopened file, or
the hash of an opened file. That means that opening a file changes its
identity, which causes unnecessary churn in the cache.
Unfortunately, there isn't an easy way to fix this. Changing the
cache key to something else, such as the modification time, means that
we won't unify cache entries if a change is made and then undone. The
approach here is to read files eagerly in GetFile, so that we know their
hashes immediately. That resolves the churn, but means that we do a ton
of file IO at startup.
Incidental changes:
Remove the FileSystem interface; there was only one implementation and
it added a fair amount of cruft. We have many other places that assume
os.Stat and such work.
Add direct accessors to FileHandle for URI, Kind, and Version. Most uses
of (FileHandle).Identity were for stuff that we derive solely from the
URI, and this helped me disentangle them. It is a *ton* of churn,
though. I can revert it if you want.
Change-Id: Ia2133bc527f71daf81c9d674951726a232ca5bc9
Reviewed-on: https://go-review.googlesource.com/c/tools/+/237037
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-06-08 13:21:24 -06:00
|
|
|
if kind := originalFH.Kind(); kind == source.Mod {
|
2020-06-10 23:11:52 -06:00
|
|
|
return originalFH.URI() == s.view.modURI
|
2020-01-28 22:12:28 -07:00
|
|
|
}
|
|
|
|
// Get the original and current parsed files in order to check package name and imports.
|
2020-07-24 15:17:13 -06:00
|
|
|
// Use the direct parsing API to avoid modifying the snapshot we're cloning.
|
|
|
|
parse := func(fh source.FileHandle) (*ast.File, error) {
|
|
|
|
data, err := fh.Read()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
fset := token.NewFileSet()
|
|
|
|
return parser.ParseFile(fset, fh.URI().Filename(), data, parser.ImportsOnly)
|
|
|
|
}
|
|
|
|
original, originalErr := parse(originalFH)
|
|
|
|
current, currentErr := parse(currentFH)
|
2020-01-28 22:12:28 -07:00
|
|
|
if originalErr != nil || currentErr != nil {
|
|
|
|
return (originalErr == nil) != (currentErr == nil)
|
|
|
|
}
|
|
|
|
// Check if the package's metadata has changed. The cases handled are:
|
|
|
|
// 1. A package's name has changed
|
|
|
|
// 2. A file's imports have changed
|
2020-07-24 15:17:13 -06:00
|
|
|
if original.Name.Name != current.Name.Name {
|
2020-01-28 22:12:28 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
// If the package's imports have increased, definitely re-run `go list`.
|
2020-07-24 15:17:13 -06:00
|
|
|
if len(original.Imports) < len(current.Imports) {
|
2020-01-28 22:12:28 -07:00
|
|
|
return true
|
|
|
|
}
|
|
|
|
importSet := make(map[string]struct{})
|
2020-07-24 15:17:13 -06:00
|
|
|
for _, importSpec := range original.Imports {
|
2020-01-28 22:12:28 -07:00
|
|
|
importSet[importSpec.Path.Value] = struct{}{}
|
|
|
|
}
|
|
|
|
// If any of the current imports were not in the original imports.
|
2020-07-24 15:17:13 -06:00
|
|
|
for _, importSpec := range current.Imports {
|
2020-01-28 22:12:28 -07:00
|
|
|
if _, ok := importSet[importSpec.Path.Value]; !ok {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false
|
2019-11-15 12:47:29 -07:00
|
|
|
}
|
2020-07-24 15:41:50 -06:00
|
|
|
|
2020-07-28 16:18:43 -06:00
|
|
|
// findWorkspaceDirectoriesLocked returns all of the directories that are
|
|
|
|
// considered to be part of the view's workspace. For GOPATH workspaces, this
|
|
|
|
// is just the view's root. For modules-based workspaces, this is the module
|
|
|
|
// root and any replace targets. It also returns the parseModHandle for the
|
|
|
|
// view's go.mod file if it has one.
|
|
|
|
//
|
|
|
|
// It assumes that the file handle is the view's go.mod file, if it has one.
|
|
|
|
// The caller need not be holding the snapshot's mutex, but it might be.
|
|
|
|
func (s *snapshot) findWorkspaceDirectories(ctx context.Context, modFH source.FileHandle) map[span.URI]struct{} {
|
|
|
|
m := map[span.URI]struct{}{
|
|
|
|
s.view.root: {},
|
|
|
|
}
|
|
|
|
// If the view does not have a go.mod file, only the root directory
|
|
|
|
// is known. In GOPATH mode, we should really watch the entire GOPATH,
|
|
|
|
// but that's too expensive.
|
|
|
|
modURI := s.view.modURI
|
|
|
|
if modURI == "" {
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
if modFH == nil {
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
// Ignore parse errors. An invalid go.mod is not fatal.
|
|
|
|
mod, err := s.ParseMod(ctx, modFH)
|
|
|
|
if err != nil {
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
for _, r := range mod.File.Replace {
|
|
|
|
// We may be replacing a module with a different version. not a path
|
|
|
|
// on disk.
|
|
|
|
if r.New.Version != "" {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
uri := span.URIFromPath(r.New.Path)
|
|
|
|
m[uri] = struct{}{}
|
|
|
|
}
|
|
|
|
return m
|
|
|
|
}
|
|
|
|
|
2020-07-24 15:41:50 -06:00
|
|
|
func (s *snapshot) BuiltinPackage(ctx context.Context) (*source.BuiltinPackage, error) {
|
|
|
|
s.view.awaitInitialized(ctx)
|
|
|
|
|
|
|
|
if s.builtin == nil {
|
|
|
|
return nil, errors.Errorf("no builtin package for view %s", s.view.name)
|
|
|
|
}
|
|
|
|
d, err := s.builtin.handle.Get(ctx, s)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
data := d.(*builtinPackageData)
|
|
|
|
return data.parsed, data.err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (s *snapshot) buildBuiltinPackage(ctx context.Context, goFiles []string) error {
|
|
|
|
if len(goFiles) != 1 {
|
|
|
|
return errors.Errorf("only expected 1 file, got %v", len(goFiles))
|
|
|
|
}
|
|
|
|
uri := span.URIFromPath(goFiles[0])
|
|
|
|
|
|
|
|
// Get the FileHandle through the cache to avoid adding it to the snapshot
|
|
|
|
// and to get the file content from disk.
|
|
|
|
fh, err := s.view.session.cache.getFile(ctx, uri)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2020-07-26 16:01:39 -06:00
|
|
|
h := s.view.session.cache.store.Bind(fh.FileIdentity(), func(ctx context.Context, arg memoize.Arg) interface{} {
|
2020-07-24 15:41:50 -06:00
|
|
|
snapshot := arg.(*snapshot)
|
|
|
|
|
2020-07-24 15:17:13 -06:00
|
|
|
pgh := snapshot.parseGoHandle(ctx, fh, source.ParseFull)
|
2020-07-24 15:41:50 -06:00
|
|
|
pgf, _, err := snapshot.parseGo(ctx, pgh)
|
|
|
|
if err != nil {
|
|
|
|
return &builtinPackageData{err: err}
|
|
|
|
}
|
|
|
|
pkg, err := ast.NewPackage(snapshot.view.session.cache.fset, map[string]*ast.File{
|
|
|
|
pgf.URI.Filename(): pgf.File,
|
|
|
|
}, nil, nil)
|
|
|
|
if err != nil {
|
|
|
|
return &builtinPackageData{err: err}
|
|
|
|
}
|
|
|
|
return &builtinPackageData{
|
|
|
|
parsed: &source.BuiltinPackage{
|
|
|
|
ParsedFile: pgf,
|
|
|
|
Package: pkg,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
})
|
|
|
|
s.builtin = &builtinPackageHandle{handle: h}
|
|
|
|
return nil
|
|
|
|
}
|