1
0
mirror of https://github.com/golang/go synced 2024-11-19 07:14:45 -07:00
go/internal/lsp/lsprpc/lsprpc_test.go
Rob Findley 405595e0b5 internal/lsp/fake: be more careful when closing the workspace
Closing the workspace has frequently been failing on Windows, due to
file locks held by the go command.

This change makes several tests more careful to check errors when
closing resources, and defers closing the regtest workspaces until the
entire test suite completes, at which point it is much more likely that
closing the workspace will succeed.

If this change results in test flakes on Windows, we should temporarily
demote errors in regtest.Runner.Close to a t.Log.

Updates golang/go#38490

Change-Id: Ibd2f7dd0e0e2faecfa0ca8c60237fc72e64f6719
Reviewed-on: https://go-review.googlesource.com/c/tools/+/228231
Run-TryBot: Robert Findley <rfindley@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
2020-04-16 19:25:41 +00:00

257 lines
6.6 KiB
Go

// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lsprpc
import (
"context"
"regexp"
"sync"
"testing"
"time"
"golang.org/x/tools/internal/jsonrpc2"
"golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/internal/lsp/cache"
"golang.org/x/tools/internal/lsp/debug"
"golang.org/x/tools/internal/lsp/fake"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/telemetry/event"
)
type fakeClient struct {
protocol.Client
logs chan string
}
func (c fakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
c.logs <- params.Message
return nil
}
type pingServer struct{ protocol.Server }
func (s pingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
event.Print(ctx, "ping")
return nil
}
func (s pingServer) Shutdown(ctx context.Context) error {
return nil
}
func TestClientLogging(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
server := pingServer{}
client := fakeClient{logs: make(chan string, 10)}
ctx = debug.WithInstance(ctx, "", "")
ss := NewStreamServer(cache.New(ctx, nil))
ss.serverForTest = server
ts := servertest.NewPipeServer(ctx, ss)
defer checkClose(t, ts.Close)
cc := ts.Connect(ctx)
go cc.Run(ctx, protocol.ClientHandler(client, jsonrpc2.MethodNotFound))
protocol.ServerDispatcher(cc).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{})
select {
case got := <-client.logs:
want := "ping"
matched, err := regexp.MatchString(want, got)
if err != nil {
t.Fatal(err)
}
if !matched {
t.Errorf("got log %q, want a log containing %q", got, want)
}
case <-time.After(1 * time.Second):
t.Error("timeout waiting for client log")
}
}
// waitableServer instruments LSP request so that we can control their timing.
// The requests chosen are arbitrary: we simply needed one that blocks, and
// another that doesn't.
type waitableServer struct {
protocol.Server
started chan struct{}
}
func (s waitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (*protocol.Hover, error) {
s.started <- struct{}{}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(200 * time.Millisecond):
}
return &protocol.Hover{}, nil
}
func (s waitableServer) Resolve(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
return item, nil
}
func (s waitableServer) Shutdown(ctx context.Context) error {
return nil
}
func checkClose(t *testing.T, closer func() error) {
t.Helper()
if err := closer(); err != nil {
t.Errorf("closing: %v", err)
}
}
func TestRequestCancellation(t *testing.T) {
server := waitableServer{
started: make(chan struct{}),
}
baseCtx := context.Background()
serveCtx := debug.WithInstance(baseCtx, "", "")
ss := NewStreamServer(cache.New(serveCtx, nil))
ss.serverForTest = server
tsDirect := servertest.NewTCPServer(serveCtx, ss)
defer checkClose(t, tsDirect.Close)
forwarderCtx := debug.WithInstance(baseCtx, "", "")
forwarder := NewForwarder("tcp", tsDirect.Addr)
tsForwarded := servertest.NewPipeServer(forwarderCtx, forwarder)
defer checkClose(t, tsForwarded.Close)
tests := []struct {
serverType string
ts servertest.Connector
}{
{"direct", tsDirect},
{"forwarder", tsForwarded},
}
for _, test := range tests {
t.Run(test.serverType, func(t *testing.T) {
cc := test.ts.Connect(baseCtx)
sd := protocol.ServerDispatcher(cc)
go cc.Run(baseCtx,
protocol.Handlers(
jsonrpc2.MethodNotFound))
ctx := context.Background()
ctx1, cancel1 := context.WithCancel(ctx)
var (
err1, err2 error
wg sync.WaitGroup
)
wg.Add(2)
go func() {
defer wg.Done()
_, err1 = sd.Hover(ctx1, &protocol.HoverParams{})
}()
go func() {
defer wg.Done()
_, err2 = sd.Resolve(ctx, &protocol.CompletionItem{})
}()
// Wait for the Hover request to start.
<-server.started
cancel1()
wg.Wait()
if err1 == nil {
t.Errorf("cancelled Hover(): got nil err")
}
if err2 != nil {
t.Errorf("uncancelled Hover(): err: %v", err2)
}
if _, err := sd.Resolve(ctx, &protocol.CompletionItem{}); err != nil {
t.Errorf("subsequent Hover(): %v", err)
}
})
}
}
const exampleProgram = `
-- go.mod --
module mod
go 1.12
-- main.go --
package main
import "fmt"
func main() {
fmt.Println("Hello World.")
}`
func TestDebugInfoLifecycle(t *testing.T) {
resetExitFuncs := OverrideExitFuncsForTest()
defer resetExitFuncs()
ws, err := fake.NewWorkspace("gopls-lsprpc-test", []byte(exampleProgram))
if err != nil {
t.Fatal(err)
}
defer func() {
if err := ws.Close(); err != nil {
// TODO(golang/go#38490): we can't currently make this an error because
// it fails on Windows: the workspace directory is still locked by a
// separate Go process.
// Once we have a reliable way to wait for proper shutdown, make this an
// error.
t.Logf("closing workspace failed: %v", err)
}
}()
baseCtx, cancel := context.WithCancel(context.Background())
defer cancel()
clientCtx := debug.WithInstance(baseCtx, "", "")
serverCtx := debug.WithInstance(baseCtx, "", "")
cache := cache.New(serverCtx, nil)
ss := NewStreamServer(cache)
tsBackend := servertest.NewTCPServer(serverCtx, ss)
forwarder := NewForwarder("tcp", tsBackend.Addr)
tsForwarder := servertest.NewPipeServer(clientCtx, forwarder)
conn1 := tsForwarder.Connect(clientCtx)
ed1, err := fake.NewConnectedEditor(clientCtx, ws, conn1)
if err != nil {
t.Fatal(err)
}
defer ed1.Shutdown(clientCtx)
conn2 := tsBackend.Connect(baseCtx)
ed2, err := fake.NewConnectedEditor(baseCtx, ws, conn2)
if err != nil {
t.Fatal(err)
}
defer ed2.Shutdown(baseCtx)
serverDebug := debug.GetInstance(serverCtx)
if got, want := len(serverDebug.State.Clients()), 2; got != want {
t.Errorf("len(server:Clients) = %d, want %d", got, want)
}
if got, want := len(serverDebug.State.Sessions()), 2; got != want {
t.Errorf("len(server:Sessions) = %d, want %d", got, want)
}
clientDebug := debug.GetInstance(clientCtx)
if got, want := len(clientDebug.State.Servers()), 1; got != want {
t.Errorf("len(client:Servers) = %d, want %d", got, want)
}
// Close one of the connections to verify that the client and session were
// dropped.
if err := ed1.Shutdown(clientCtx); err != nil {
t.Fatal(err)
}
if got, want := len(serverDebug.State.Sessions()), 1; got != want {
t.Errorf("len(server:Sessions()) = %d, want %d", got, want)
}
// TODO(rfindley): once disconnection works, assert that len(Clients) == 1
// (as of writing, it is still 2)
}
// TODO: add a test for telemetry.