mirror of
https://github.com/golang/go
synced 2024-11-18 21:05:02 -07:00
d7101b74a4
The early return logic for didOpen events in (*snapshot).invalidateContent was preventing the creation of a new snapshot, which was in turn stopping the versions from being updated. This exposed a fundamental issue in the way we were calculating workspace diagnostics. Since we weren't waiting for diagnostics to be completed for an entire snapshot before replying that the server had been initialized, snapshots were being cloned without any type information. For quickfix code actions, we assume that we have all information cached (since we need to have sent the diagnostics that the quickfix is mapped to), so we were not finding the cached analysis results. To handle this in the short-term, we key analyses by their names, and then regenerate results as-needed for code actions. This is technically more correct than simply assuming that we have the analyses cached. In a follow-up CL, I will send a follow-up that will make sure that snapshots "wait" on each other to be fully constructed before being cloned. Change-Id: Ie89fcdb438b6b8b675f87335561bf47b768641ac Reviewed-on: https://go-review.googlesource.com/c/tools/+/208265 Run-TryBot: Rebecca Stambler <rstambler@golang.org> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Cottrell <iancottrell@google.com> Reviewed-by: Heschi Kreinick <heschi@google.com>
120 lines
3.7 KiB
Go
120 lines
3.7 KiB
Go
// Copyright 2018 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package lsp
|
|
|
|
import (
|
|
"context"
|
|
"strings"
|
|
|
|
"golang.org/x/tools/internal/lsp/protocol"
|
|
"golang.org/x/tools/internal/lsp/source"
|
|
"golang.org/x/tools/internal/lsp/telemetry"
|
|
"golang.org/x/tools/internal/span"
|
|
"golang.org/x/tools/internal/telemetry/log"
|
|
"golang.org/x/tools/internal/telemetry/trace"
|
|
)
|
|
|
|
func (s *Server) diagnoseSnapshot(ctx context.Context, snapshot source.Snapshot, cphs []source.CheckPackageHandle) {
|
|
for _, cph := range cphs {
|
|
if len(cph.CompiledGoFiles()) == 0 {
|
|
continue
|
|
}
|
|
// Find a file on which to call diagnostics.
|
|
uri := cph.CompiledGoFiles()[0].File().Identity().URI
|
|
|
|
// Run diagnostics on the workspace package.
|
|
go func(snapshot source.Snapshot, uri span.URI) {
|
|
if err := s.diagnostics(ctx, snapshot, uri); err != nil {
|
|
log.Error(snapshot.View().BackgroundContext(), "error computing diagnostics", err, telemetry.URI.Of(uri))
|
|
}
|
|
|
|
}(snapshot, uri)
|
|
}
|
|
}
|
|
|
|
func (s *Server) diagnostics(ctx context.Context, snapshot source.Snapshot, uri span.URI) error {
|
|
ctx, done := trace.StartSpan(ctx, "lsp:background-worker")
|
|
defer done()
|
|
|
|
ctx = telemetry.File.With(ctx, uri)
|
|
|
|
f, err := snapshot.View().GetFile(ctx, uri)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
reports, warningMsg, err := source.Diagnostics(ctx, snapshot, f, snapshot.View().Options().DisabledAnalyses)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
if warningMsg != "" {
|
|
s.client.ShowMessage(ctx, &protocol.ShowMessageParams{
|
|
Type: protocol.Info,
|
|
Message: warningMsg,
|
|
})
|
|
}
|
|
|
|
s.undeliveredMu.Lock()
|
|
defer s.undeliveredMu.Unlock()
|
|
|
|
for fileID, diagnostics := range reports {
|
|
if err := s.publishDiagnostics(ctx, fileID, diagnostics); err != nil {
|
|
if s.undelivered == nil {
|
|
s.undelivered = make(map[source.FileIdentity][]source.Diagnostic)
|
|
}
|
|
s.undelivered[fileID] = diagnostics
|
|
|
|
log.Error(ctx, "failed to deliver diagnostic (will retry)", err, telemetry.File)
|
|
continue
|
|
}
|
|
// In case we had old, undelivered diagnostics.
|
|
delete(s.undelivered, fileID)
|
|
}
|
|
// Anytime we compute diagnostics, make sure to also send along any
|
|
// undelivered ones (only for remaining URIs).
|
|
for uri, diagnostics := range s.undelivered {
|
|
if err := s.publishDiagnostics(ctx, uri, diagnostics); err != nil {
|
|
log.Error(ctx, "failed to deliver diagnostic for (will not retry)", err, telemetry.File)
|
|
}
|
|
|
|
// If we fail to deliver the same diagnostics twice, just give up.
|
|
delete(s.undelivered, uri)
|
|
}
|
|
return nil
|
|
}
|
|
|
|
func (s *Server) publishDiagnostics(ctx context.Context, fileID source.FileIdentity, diagnostics []source.Diagnostic) error {
|
|
s.client.PublishDiagnostics(ctx, &protocol.PublishDiagnosticsParams{
|
|
Diagnostics: toProtocolDiagnostics(ctx, diagnostics),
|
|
URI: protocol.NewURI(fileID.URI),
|
|
Version: fileID.Version,
|
|
})
|
|
return nil
|
|
}
|
|
|
|
func toProtocolDiagnostics(ctx context.Context, diagnostics []source.Diagnostic) []protocol.Diagnostic {
|
|
reports := []protocol.Diagnostic{}
|
|
for _, diag := range diagnostics {
|
|
related := make([]protocol.DiagnosticRelatedInformation, 0, len(diag.Related))
|
|
for _, rel := range diag.Related {
|
|
related = append(related, protocol.DiagnosticRelatedInformation{
|
|
Location: protocol.Location{
|
|
URI: protocol.NewURI(rel.URI),
|
|
Range: rel.Range,
|
|
},
|
|
Message: rel.Message,
|
|
})
|
|
}
|
|
reports = append(reports, protocol.Diagnostic{
|
|
Message: strings.TrimSpace(diag.Message), // go list returns errors prefixed by newline
|
|
Range: diag.Range,
|
|
Severity: diag.Severity,
|
|
Source: diag.Source,
|
|
Tags: diag.Tags,
|
|
RelatedInformation: related,
|
|
})
|
|
}
|
|
return reports
|
|
}
|