1
0
mirror of https://github.com/golang/go synced 2024-11-18 18:44:42 -07:00
go/internal/lsp/lsprpc/lsprpc_test.go
Rob Findley cb8d9cd245 internal/lsp: support configurable codeLens
Some code lenses may be undesirable for certain users or editors -- for
example a code lens that runs tests, when VSCode already supports this
functionality outside of the LSP. To handle such situations, support
configuring code lenses via a new 'codelens' gopls option.

Add support for code lens in regtests, and use this to test the new
configuration. To achieve this, thread through a new 'EditorConfig' type
that configures the fake editor's LSP session. It made sense to move the
test Env overlay onto this config object as well.

While looking at them, document some types in source.Options.

Change-Id: I961077422a273829c5cbd83c3b87fae29f77eeda
Reviewed-on: https://go-review.googlesource.com/c/tools/+/232680
Run-TryBot: Robert Findley <rfindley@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-05-08 18:47:35 +00:00

257 lines
6.6 KiB
Go

// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lsprpc
import (
"context"
"regexp"
"sync"
"testing"
"time"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/internal/lsp/cache"
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/fake"
"golang.org/x/tools/internal/lsp/protocol"
)
type fakeClient struct {
protocol.Client
logs chan string
}
func (c fakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
c.logs <- params.Message
return nil
}
type pingServer struct{ protocol.Server }
func (s pingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
event.Log(ctx, "ping")
return nil
}
func (s pingServer) Shutdown(ctx context.Context) error {
return nil
}
func TestClientLogging(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
server := pingServer{}
client := fakeClient{logs: make(chan string, 10)}
ctx = debug.WithInstance(ctx, "", "")
ss := NewStreamServer(cache.New(ctx, nil))
ss.serverForTest = server
ts := servertest.NewPipeServer(ctx, ss)
defer checkClose(t, ts.Close)
cc := ts.Connect(ctx)
go cc.Run(ctx, protocol.ClientHandler(client, jsonrpc2.MethodNotFound))
protocol.ServerDispatcher(cc).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{})
select {
case got := <-client.logs:
want := "ping"
matched, err := regexp.MatchString(want, got)
if err != nil {
t.Fatal(err)
}
if !matched {
t.Errorf("got log %q, want a log containing %q", got, want)
}
case <-time.After(1 * time.Second):
t.Error("timeout waiting for client log")
}
}
// waitableServer instruments LSP request so that we can control their timing.
// The requests chosen are arbitrary: we simply needed one that blocks, and
// another that doesn't.
type waitableServer struct {
protocol.Server
started chan struct{}
}
func (s waitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (*protocol.Hover, error) {
s.started <- struct{}{}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(200 * time.Millisecond):
}
return &protocol.Hover{}, nil
}
func (s waitableServer) Resolve(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
return item, nil
}
func (s waitableServer) Shutdown(ctx context.Context) error {
return nil
}
func checkClose(t *testing.T, closer func() error) {
t.Helper()
if err := closer(); err != nil {
t.Errorf("closing: %v", err)
}
}
func TestRequestCancellation(t *testing.T) {
server := waitableServer{
started: make(chan struct{}),
}
baseCtx := context.Background()
serveCtx := debug.WithInstance(baseCtx, "", "")
ss := NewStreamServer(cache.New(serveCtx, nil))
ss.serverForTest = server
tsDirect := servertest.NewTCPServer(serveCtx, ss)
defer checkClose(t, tsDirect.Close)
forwarderCtx := debug.WithInstance(baseCtx, "", "")
forwarder := NewForwarder("tcp", tsDirect.Addr)
tsForwarded := servertest.NewPipeServer(forwarderCtx, forwarder)
defer checkClose(t, tsForwarded.Close)
tests := []struct {
serverType string
ts servertest.Connector
}{
{"direct", tsDirect},
{"forwarder", tsForwarded},
}
for _, test := range tests {
t.Run(test.serverType, func(t *testing.T) {
cc := test.ts.Connect(baseCtx)
sd := protocol.ServerDispatcher(cc)
go cc.Run(baseCtx,
protocol.Handlers(
jsonrpc2.MethodNotFound))
ctx := context.Background()
ctx1, cancel1 := context.WithCancel(ctx)
var (
err1, err2 error
wg sync.WaitGroup
)
wg.Add(2)
go func() {
defer wg.Done()
_, err1 = sd.Hover(ctx1, &protocol.HoverParams{})
}()
go func() {
defer wg.Done()
_, err2 = sd.Resolve(ctx, &protocol.CompletionItem{})
}()
// Wait for the Hover request to start.
<-server.started
cancel1()
wg.Wait()
if err1 == nil {
t.Errorf("cancelled Hover(): got nil err")
}
if err2 != nil {
t.Errorf("uncancelled Hover(): err: %v", err2)
}
if _, err := sd.Resolve(ctx, &protocol.CompletionItem{}); err != nil {
t.Errorf("subsequent Hover(): %v", err)
}
})
}
}
const exampleProgram = `
-- go.mod --
module mod
go 1.12
-- main.go --
package main
import "fmt"
func main() {
fmt.Println("Hello World.")
}`
func TestDebugInfoLifecycle(t *testing.T) {
resetExitFuncs := OverrideExitFuncsForTest()
defer resetExitFuncs()
sb, err := fake.NewSandbox("gopls-lsprpc-test", exampleProgram, "", false)
if err != nil {
t.Fatal(err)
}
defer func() {
if err := sb.Close(); err != nil {
// TODO(golang/go#38490): we can't currently make this an error because
// it fails on Windows: the workspace directory is still locked by a
// separate Go process.
// Once we have a reliable way to wait for proper shutdown, make this an
// error.
t.Logf("closing workspace failed: %v", err)
}
}()
baseCtx, cancel := context.WithCancel(context.Background())
defer cancel()
clientCtx := debug.WithInstance(baseCtx, "", "")
serverCtx := debug.WithInstance(baseCtx, "", "")
cache := cache.New(serverCtx, nil)
ss := NewStreamServer(cache)
tsBackend := servertest.NewTCPServer(serverCtx, ss)
forwarder := NewForwarder("tcp", tsBackend.Addr)
tsForwarder := servertest.NewPipeServer(clientCtx, forwarder)
conn1 := tsForwarder.Connect(clientCtx)
ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, conn1)
if err != nil {
t.Fatal(err)
}
defer ed1.Shutdown(clientCtx)
conn2 := tsBackend.Connect(baseCtx)
ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, conn2)
if err != nil {
t.Fatal(err)
}
defer ed2.Shutdown(baseCtx)
serverDebug := debug.GetInstance(serverCtx)
if got, want := len(serverDebug.State.Clients()), 2; got != want {
t.Errorf("len(server:Clients) = %d, want %d", got, want)
}
if got, want := len(serverDebug.State.Sessions()), 2; got != want {
t.Errorf("len(server:Sessions) = %d, want %d", got, want)
}
clientDebug := debug.GetInstance(clientCtx)
if got, want := len(clientDebug.State.Servers()), 1; got != want {
t.Errorf("len(client:Servers) = %d, want %d", got, want)
}
// Close one of the connections to verify that the client and session were
// dropped.
if err := ed1.Shutdown(clientCtx); err != nil {
t.Fatal(err)
}
if got, want := len(serverDebug.State.Sessions()), 1; got != want {
t.Errorf("len(server:Sessions()) = %d, want %d", got, want)
}
// TODO(rfindley): once disconnection works, assert that len(Clients) == 1
// (as of writing, it is still 2)
}
// TODO: add a test for telemetry.