1
0
mirror of https://github.com/golang/go synced 2024-09-30 20:18:33 -06:00
go/internal/lsp/general.go
Heschi Kreinick c1903db4db internal/memoize: switch from GC-driven to explicit deletion
The GC-based cache has given us a number of problems. First, memory
leaks driven by reference cycles: the Go runtime cannot collect cycles
involving finalizers, which prevents us from writing natural code in
Bind callbacks. If we screw it up, we get a mysterious leak that takes a
long time to track down. Second, the behavior is generally mysterious;
it's hard to predict how long a value lasts, and harder to tell if a
value being live is a bug. Third, we think that it may be interacting
poorly with the GC, resulting in unnecessary memory usage.

The structure of the values we put in the cache is not actually that
complicated -- there are only 5 significant types: parse, typecheck,
analyze, parse mod, and analyze mod. Managing them manually should not
be conceptually difficult, and in fact we already do most of the work
in (*snapshot).clone.

In this CL the cache adds the concept of "generations", which function
as reference counts on cache entries. Entries are still global and
shared across generations, but will be explicitly deleted once no
generations refer to them. The idea is that each snapshot is a new
generation, and can inherit entries from the previous snapshot or leave
them behind to be deleted.

One obvious risk of this scheme is that we'll leave dangling references
to values without actually inheriting them across generations. To
prevent that, getting a value requires passing in the generation at
which it's being read, and an error will be returned if that generation
is dead.

Change-Id: I4b30891efd7be4e10f2b84f4c067b0dee43dcf9c
Reviewed-on: https://go-review.googlesource.com/c/tools/+/242838
Run-TryBot: Heschi Kreinick <heschi@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
Reviewed-by: Robert Findley <rfindley@google.com>
2020-08-10 19:02:17 +00:00

462 lines
14 KiB
Go

// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lsp
import (
"bytes"
"context"
"fmt"
"io"
"os"
"path"
"sync"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/debug/tag"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/lsp/source"
"golang.org/x/tools/internal/span"
)
func (s *Server) initialize(ctx context.Context, params *protocol.ParamInitialize) (*protocol.InitializeResult, error) {
s.stateMu.Lock()
if s.state >= serverInitializing {
defer s.stateMu.Unlock()
return nil, fmt.Errorf("%w: initialize called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state)
}
s.state = serverInitializing
s.stateMu.Unlock()
s.progress.supportsWorkDoneProgress = params.Capabilities.Window.WorkDoneProgress
options := s.session.Options()
defer func() { s.session.SetOptions(options) }()
// TODO: Handle results here.
source.SetOptions(&options, params.InitializationOptions)
options.ForClientCapabilities(params.Capabilities)
// gopls only supports URIs with a file:// scheme. Any other URIs will not
// work, so fail to initialize. See golang/go#40272.
if params.RootURI != "" && !params.RootURI.SpanURI().IsFile() {
return nil, fmt.Errorf("unsupported URI scheme: %v (gopls only supports file URIs)", params.RootURI)
}
for _, folder := range params.WorkspaceFolders {
uri := span.URIFromURI(folder.URI)
if !uri.IsFile() {
return nil, fmt.Errorf("unsupported URI scheme: %q (gopls only supports file URIs)", folder.URI)
}
}
s.pendingFolders = params.WorkspaceFolders
if len(s.pendingFolders) == 0 {
if params.RootURI != "" {
s.pendingFolders = []protocol.WorkspaceFolder{{
URI: string(params.RootURI),
Name: path.Base(params.RootURI.SpanURI().Filename()),
}}
}
}
var codeActionProvider interface{} = true
if ca := params.Capabilities.TextDocument.CodeAction; len(ca.CodeActionLiteralSupport.CodeActionKind.ValueSet) > 0 {
// If the client has specified CodeActionLiteralSupport,
// send the code actions we support.
//
// Using CodeActionOptions is only valid if codeActionLiteralSupport is set.
codeActionProvider = &protocol.CodeActionOptions{
CodeActionKinds: s.getSupportedCodeActions(),
}
}
var renameOpts interface{} = true
if r := params.Capabilities.TextDocument.Rename; r.PrepareSupport {
renameOpts = protocol.RenameOptions{
PrepareProvider: r.PrepareSupport,
}
}
goplsVer := &bytes.Buffer{}
debug.PrintVersionInfo(ctx, goplsVer, true, debug.PlainText)
return &protocol.InitializeResult{
Capabilities: protocol.ServerCapabilities{
CallHierarchyProvider: true,
CodeActionProvider: codeActionProvider,
CompletionProvider: protocol.CompletionOptions{
TriggerCharacters: []string{"."},
},
DefinitionProvider: true,
TypeDefinitionProvider: true,
ImplementationProvider: true,
DocumentFormattingProvider: true,
DocumentSymbolProvider: true,
WorkspaceSymbolProvider: true,
ExecuteCommandProvider: protocol.ExecuteCommandOptions{
Commands: options.SupportedCommands,
},
FoldingRangeProvider: true,
HoverProvider: true,
DocumentHighlightProvider: true,
DocumentLinkProvider: protocol.DocumentLinkOptions{},
ReferencesProvider: true,
RenameProvider: renameOpts,
SignatureHelpProvider: protocol.SignatureHelpOptions{
TriggerCharacters: []string{"(", ","},
},
TextDocumentSync: &protocol.TextDocumentSyncOptions{
Change: protocol.Incremental,
OpenClose: true,
Save: protocol.SaveOptions{
IncludeText: false,
},
},
Workspace: protocol.WorkspaceGn{
WorkspaceFolders: protocol.WorkspaceFoldersGn{
Supported: true,
ChangeNotifications: "workspace/didChangeWorkspaceFolders",
},
},
},
ServerInfo: struct {
Name string `json:"name"`
Version string `json:"version,omitempty"`
}{
Name: "gopls",
Version: goplsVer.String(),
},
}, nil
}
func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error {
s.stateMu.Lock()
if s.state >= serverInitialized {
defer s.stateMu.Unlock()
return fmt.Errorf("%w: initalized called while server in %v state", jsonrpc2.ErrInvalidRequest, s.state)
}
s.state = serverInitialized
s.stateMu.Unlock()
options := s.session.Options()
defer func() { s.session.SetOptions(options) }()
var registrations []protocol.Registration
if options.ConfigurationSupported && options.DynamicConfigurationSupported {
registrations = append(registrations,
protocol.Registration{
ID: "workspace/didChangeConfiguration",
Method: "workspace/didChangeConfiguration",
},
protocol.Registration{
ID: "workspace/didChangeWorkspaceFolders",
Method: "workspace/didChangeWorkspaceFolders",
},
)
}
// TODO: this event logging may be unnecessary.
// The version info is included in the initialize response.
buf := &bytes.Buffer{}
debug.PrintVersionInfo(ctx, buf, true, debug.PlainText)
event.Log(ctx, buf.String())
if err := s.addFolders(ctx, s.pendingFolders); err != nil {
return err
}
s.pendingFolders = nil
if len(registrations) > 0 {
if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
Registrations: registrations,
}); err != nil {
return err
}
}
return nil
}
func (s *Server) addFolders(ctx context.Context, folders []protocol.WorkspaceFolder) error {
originalViews := len(s.session.Views())
viewErrors := make(map[span.URI]error)
var wg sync.WaitGroup
if s.session.Options().VerboseWorkDoneProgress {
work := s.progress.start(ctx, DiagnosticWorkTitle(FromInitialWorkspaceLoad), "Calculating diagnostics for initial workspace load...", nil, nil)
defer func() {
go func() {
wg.Wait()
work.end(ctx, "Done.")
}()
}()
}
dirsToWatch := map[span.URI]struct{}{}
for _, folder := range folders {
uri := span.URIFromURI(folder.URI)
view, snapshot, release, err := s.addView(ctx, folder.Name, uri)
if err != nil {
viewErrors[uri] = err
continue
}
for _, dir := range snapshot.WorkspaceDirectories(ctx) {
dirsToWatch[dir] = struct{}{}
}
// Print each view's environment.
buf := &bytes.Buffer{}
if err := view.WriteEnv(ctx, buf); err != nil {
event.Error(ctx, "failed to write environment", err, tag.Directory.Of(view.Folder().Filename()))
continue
}
event.Log(ctx, buf.String())
// Diagnose the newly created view.
wg.Add(1)
go func() {
s.diagnoseDetached(snapshot)
release()
wg.Done()
}()
}
// Register for file watching notifications, if they are supported.
s.watchedDirectoriesMu.Lock()
err := s.registerWatchedDirectoriesLocked(ctx, dirsToWatch)
s.watchedDirectoriesMu.Unlock()
if err != nil {
return err
}
if len(viewErrors) > 0 {
errMsg := fmt.Sprintf("Error loading workspace folders (expected %v, got %v)\n", len(folders), len(s.session.Views())-originalViews)
for uri, err := range viewErrors {
errMsg += fmt.Sprintf("failed to load view for %s: %v\n", uri, err)
}
return s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
Type: protocol.Error,
Message: errMsg,
})
}
return nil
}
// updateWatchedDirectories compares the current set of directories to watch
// with the previously registered set of directories. If the set of directories
// has changed, we unregister and re-register for file watching notifications.
// updatedSnapshots is the set of snapshots that have been updated.
func (s *Server) updateWatchedDirectories(ctx context.Context, updatedSnapshots []source.Snapshot) error {
dirsToWatch := map[span.URI]struct{}{}
seenViews := map[source.View]struct{}{}
// Collect all of the workspace directories from the updated snapshots.
for _, snapshot := range updatedSnapshots {
seenViews[snapshot.View()] = struct{}{}
for _, dir := range snapshot.WorkspaceDirectories(ctx) {
dirsToWatch[dir] = struct{}{}
}
}
// Not all views were necessarily updated, so check the remaining views.
for _, view := range s.session.Views() {
if _, ok := seenViews[view]; ok {
continue
}
snapshot, release := view.Snapshot(ctx)
for _, dir := range snapshot.WorkspaceDirectories(ctx) {
dirsToWatch[dir] = struct{}{}
}
release()
}
s.watchedDirectoriesMu.Lock()
defer s.watchedDirectoriesMu.Unlock()
// Nothing to do if the set of workspace directories is unchanged.
if equalURISet(s.watchedDirectories, dirsToWatch) {
return nil
}
// If the set of directories to watch has changed, register the updates and
// unregister the previously watched directories. This ordering avoids a
// period where no files are being watched. Still, if a user makes on-disk
// changes before these updates are complete, we may miss them for the new
// directories.
if s.watchRegistrationCount > 0 {
prevID := s.watchRegistrationCount - 1
if err := s.registerWatchedDirectoriesLocked(ctx, dirsToWatch); err != nil {
return err
}
return s.client.UnregisterCapability(ctx, &protocol.UnregistrationParams{
Unregisterations: []protocol.Unregistration{{
ID: watchedFilesCapabilityID(prevID),
Method: "workspace/didChangeWatchedFiles",
}},
})
}
return nil
}
func watchedFilesCapabilityID(id uint64) string {
return fmt.Sprintf("workspace/didChangeWatchedFiles-%d", id)
}
func equalURISet(m1, m2 map[span.URI]struct{}) bool {
if len(m1) != len(m2) {
return false
}
for k := range m1 {
_, ok := m2[k]
if !ok {
return false
}
}
return true
}
// registerWatchedDirectoriesLocked sends the workspace/didChangeWatchedFiles
// registrations to the client and updates s.watchedDirectories.
func (s *Server) registerWatchedDirectoriesLocked(ctx context.Context, dirs map[span.URI]struct{}) error {
if !s.session.Options().DynamicWatchedFilesSupported {
return nil
}
for k := range s.watchedDirectories {
delete(s.watchedDirectories, k)
}
var watchers []protocol.FileSystemWatcher
for dir := range dirs {
watchers = append(watchers, protocol.FileSystemWatcher{
GlobPattern: fmt.Sprintf("%s/**/*.{go,mod,sum}", dir),
Kind: float64(protocol.WatchChange + protocol.WatchDelete + protocol.WatchCreate),
})
}
if err := s.client.RegisterCapability(ctx, &protocol.RegistrationParams{
Registrations: []protocol.Registration{{
ID: watchedFilesCapabilityID(s.watchRegistrationCount),
Method: "workspace/didChangeWatchedFiles",
RegisterOptions: protocol.DidChangeWatchedFilesRegistrationOptions{
Watchers: watchers,
},
}},
}); err != nil {
return err
}
s.watchRegistrationCount++
for dir := range dirs {
s.watchedDirectories[dir] = struct{}{}
}
return nil
}
func (s *Server) fetchConfig(ctx context.Context, name string, folder span.URI, o *source.Options) error {
if !s.session.Options().ConfigurationSupported {
return nil
}
v := protocol.ParamConfiguration{
ConfigurationParams: protocol.ConfigurationParams{
Items: []protocol.ConfigurationItem{{
ScopeURI: string(folder),
Section: "gopls",
}, {
ScopeURI: string(folder),
Section: fmt.Sprintf("gopls-%s", name),
}},
},
}
configs, err := s.client.Configuration(ctx, &v)
if err != nil {
return fmt.Errorf("failed to get workspace configuration from client (%s): %v", folder, err)
}
for _, config := range configs {
results := source.SetOptions(o, config)
for _, result := range results {
if result.Error != nil {
if err := s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
Type: protocol.Error,
Message: result.Error.Error(),
}); err != nil {
return err
}
}
switch result.State {
case source.OptionUnexpected:
if err := s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
Type: protocol.Error,
Message: fmt.Sprintf("unexpected config %s", result.Name),
}); err != nil {
return err
}
case source.OptionDeprecated:
msg := fmt.Sprintf("config %s is deprecated", result.Name)
if result.Replacement != "" {
msg = fmt.Sprintf("%s, use %s instead", msg, result.Replacement)
}
if err := s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
Type: protocol.Warning,
Message: msg,
}); err != nil {
return err
}
}
}
}
return nil
}
// beginFileRequest checks preconditions for a file-oriented request and routes
// it to a snapshot.
// We don't want to return errors for benign conditions like wrong file type,
// so callers should do if !ok { return err } rather than if err != nil.
func (s *Server) beginFileRequest(ctx context.Context, pURI protocol.DocumentURI, expectKind source.FileKind) (source.Snapshot, source.VersionedFileHandle, bool, func(), error) {
uri := pURI.SpanURI()
if !uri.IsFile() {
// Not a file URI. Stop processing the request, but don't return an error.
return nil, nil, false, func() {}, nil
}
view, err := s.session.ViewOf(uri)
if err != nil {
return nil, nil, false, func() {}, err
}
snapshot, release := view.Snapshot(ctx)
fh, err := snapshot.GetFile(ctx, uri)
if err != nil {
release()
return nil, nil, false, func() {}, err
}
if expectKind != source.UnknownKind && fh.Kind() != expectKind {
// Wrong kind of file. Nothing to do.
release()
return nil, nil, false, func() {}, nil
}
return snapshot, fh, true, release, nil
}
func (s *Server) shutdown(ctx context.Context) error {
s.stateMu.Lock()
defer s.stateMu.Unlock()
if s.state < serverInitialized {
event.Log(ctx, "server shutdown without initialization")
}
if s.state != serverShutDown {
// drop all the active views
s.session.Shutdown(ctx)
s.state = serverShutDown
}
return nil
}
func (s *Server) exit(ctx context.Context) error {
s.stateMu.Lock()
defer s.stateMu.Unlock()
// TODO: We need a better way to find the conn close method.
s.client.(io.Closer).Close()
if s.state != serverShutDown {
// TODO: We should be able to do better than this.
os.Exit(1)
}
// we don't terminate the process on a normal exit, we just allow it to
// close naturally if needed after the connection is closed.
return nil
}