mirror of
https://github.com/golang/go
synced 2024-11-19 01:34:40 -07:00
8a3674bff3
Exit now closes the connection rather than exiting the process. This allows things to shutdown gracefully, and removes special cases. It also allows the tests to call CloseEditor instead of just Shutdown, which prevents goroutine leaks. Change-Id: I26121ba5d393ef74ce0e912611c8b3817e3691ea Reviewed-on: https://go-review.googlesource.com/c/tools/+/231798 Reviewed-by: Robert Findley <rfindley@google.com>
252 lines
6.6 KiB
Go
252 lines
6.6 KiB
Go
// Copyright 2020 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package lsprpc
|
|
|
|
import (
|
|
"context"
|
|
"regexp"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"golang.org/x/tools/internal/event"
|
|
"golang.org/x/tools/internal/jsonrpc2"
|
|
"golang.org/x/tools/internal/jsonrpc2/servertest"
|
|
"golang.org/x/tools/internal/lsp/cache"
|
|
"golang.org/x/tools/internal/lsp/debug"
|
|
"golang.org/x/tools/internal/lsp/fake"
|
|
"golang.org/x/tools/internal/lsp/protocol"
|
|
)
|
|
|
|
type fakeClient struct {
|
|
protocol.Client
|
|
|
|
logs chan string
|
|
}
|
|
|
|
func (c fakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
|
|
c.logs <- params.Message
|
|
return nil
|
|
}
|
|
|
|
type pingServer struct{ protocol.Server }
|
|
|
|
func (s pingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
|
|
event.Log(ctx, "ping")
|
|
return nil
|
|
}
|
|
|
|
func (s pingServer) Shutdown(ctx context.Context) error {
|
|
return nil
|
|
}
|
|
|
|
func TestClientLogging(t *testing.T) {
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
|
|
server := pingServer{}
|
|
client := fakeClient{logs: make(chan string, 10)}
|
|
|
|
ctx = debug.WithInstance(ctx, "", "")
|
|
ss := NewStreamServer(cache.New(ctx, nil))
|
|
ss.serverForTest = server
|
|
ts := servertest.NewPipeServer(ctx, ss, nil)
|
|
defer checkClose(t, ts.Close)
|
|
cc := ts.Connect(ctx)
|
|
cc.Go(ctx, protocol.ClientHandler(client, jsonrpc2.MethodNotFound))
|
|
|
|
protocol.ServerDispatcher(cc).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{})
|
|
|
|
select {
|
|
case got := <-client.logs:
|
|
want := "ping"
|
|
matched, err := regexp.MatchString(want, got)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if !matched {
|
|
t.Errorf("got log %q, want a log containing %q", got, want)
|
|
}
|
|
case <-time.After(1 * time.Second):
|
|
t.Error("timeout waiting for client log")
|
|
}
|
|
}
|
|
|
|
// waitableServer instruments LSP request so that we can control their timing.
|
|
// The requests chosen are arbitrary: we simply needed one that blocks, and
|
|
// another that doesn't.
|
|
type waitableServer struct {
|
|
protocol.Server
|
|
|
|
started chan struct{}
|
|
}
|
|
|
|
func (s waitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (*protocol.Hover, error) {
|
|
s.started <- struct{}{}
|
|
select {
|
|
case <-ctx.Done():
|
|
return nil, ctx.Err()
|
|
case <-time.After(200 * time.Millisecond):
|
|
}
|
|
return &protocol.Hover{}, nil
|
|
}
|
|
|
|
func (s waitableServer) Resolve(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
|
|
return item, nil
|
|
}
|
|
|
|
func (s waitableServer) Shutdown(ctx context.Context) error {
|
|
return nil
|
|
}
|
|
|
|
func checkClose(t *testing.T, closer func() error) {
|
|
t.Helper()
|
|
if err := closer(); err != nil {
|
|
t.Errorf("closing: %v", err)
|
|
}
|
|
}
|
|
|
|
func TestRequestCancellation(t *testing.T) {
|
|
server := waitableServer{
|
|
started: make(chan struct{}),
|
|
}
|
|
baseCtx := context.Background()
|
|
serveCtx := debug.WithInstance(baseCtx, "", "")
|
|
ss := NewStreamServer(cache.New(serveCtx, nil))
|
|
ss.serverForTest = server
|
|
tsDirect := servertest.NewTCPServer(serveCtx, ss, nil)
|
|
defer checkClose(t, tsDirect.Close)
|
|
|
|
forwarderCtx := debug.WithInstance(baseCtx, "", "")
|
|
forwarder := NewForwarder("tcp", tsDirect.Addr)
|
|
tsForwarded := servertest.NewPipeServer(forwarderCtx, forwarder, nil)
|
|
defer checkClose(t, tsForwarded.Close)
|
|
|
|
tests := []struct {
|
|
serverType string
|
|
ts servertest.Connector
|
|
}{
|
|
{"direct", tsDirect},
|
|
{"forwarder", tsForwarded},
|
|
}
|
|
|
|
for _, test := range tests {
|
|
t.Run(test.serverType, func(t *testing.T) {
|
|
cc := test.ts.Connect(baseCtx)
|
|
sd := protocol.ServerDispatcher(cc)
|
|
cc.Go(baseCtx,
|
|
protocol.Handlers(
|
|
jsonrpc2.MethodNotFound))
|
|
|
|
ctx := context.Background()
|
|
ctx1, cancel1 := context.WithCancel(ctx)
|
|
var (
|
|
err1, err2 error
|
|
wg sync.WaitGroup
|
|
)
|
|
wg.Add(2)
|
|
go func() {
|
|
defer wg.Done()
|
|
_, err1 = sd.Hover(ctx1, &protocol.HoverParams{})
|
|
}()
|
|
go func() {
|
|
defer wg.Done()
|
|
_, err2 = sd.Resolve(ctx, &protocol.CompletionItem{})
|
|
}()
|
|
// Wait for the Hover request to start.
|
|
<-server.started
|
|
cancel1()
|
|
wg.Wait()
|
|
if err1 == nil {
|
|
t.Errorf("cancelled Hover(): got nil err")
|
|
}
|
|
if err2 != nil {
|
|
t.Errorf("uncancelled Hover(): err: %v", err2)
|
|
}
|
|
if _, err := sd.Resolve(ctx, &protocol.CompletionItem{}); err != nil {
|
|
t.Errorf("subsequent Hover(): %v", err)
|
|
}
|
|
})
|
|
}
|
|
}
|
|
|
|
const exampleProgram = `
|
|
-- go.mod --
|
|
module mod
|
|
|
|
go 1.12
|
|
-- main.go --
|
|
package main
|
|
|
|
import "fmt"
|
|
|
|
func main() {
|
|
fmt.Println("Hello World.")
|
|
}`
|
|
|
|
func TestDebugInfoLifecycle(t *testing.T) {
|
|
sb, err := fake.NewSandbox("gopls-lsprpc-test", exampleProgram, "", false)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer func() {
|
|
if err := sb.Close(); err != nil {
|
|
// TODO(golang/go#38490): we can't currently make this an error because
|
|
// it fails on Windows: the workspace directory is still locked by a
|
|
// separate Go process.
|
|
// Once we have a reliable way to wait for proper shutdown, make this an
|
|
// error.
|
|
t.Logf("closing workspace failed: %v", err)
|
|
}
|
|
}()
|
|
|
|
baseCtx, cancel := context.WithCancel(context.Background())
|
|
defer cancel()
|
|
clientCtx := debug.WithInstance(baseCtx, "", "")
|
|
serverCtx := debug.WithInstance(baseCtx, "", "")
|
|
|
|
cache := cache.New(serverCtx, nil)
|
|
ss := NewStreamServer(cache)
|
|
tsBackend := servertest.NewTCPServer(serverCtx, ss, nil)
|
|
|
|
forwarder := NewForwarder("tcp", tsBackend.Addr)
|
|
tsForwarder := servertest.NewPipeServer(clientCtx, forwarder, nil)
|
|
|
|
conn1 := tsForwarder.Connect(clientCtx)
|
|
ed1, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(clientCtx, conn1, fake.ClientHooks{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer ed1.Shutdown(clientCtx)
|
|
conn2 := tsBackend.Connect(baseCtx)
|
|
ed2, err := fake.NewEditor(sb, fake.EditorConfig{}).Connect(baseCtx, conn2, fake.ClientHooks{})
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
defer ed2.Shutdown(baseCtx)
|
|
|
|
serverDebug := debug.GetInstance(serverCtx)
|
|
if got, want := len(serverDebug.State.Clients()), 2; got != want {
|
|
t.Errorf("len(server:Clients) = %d, want %d", got, want)
|
|
}
|
|
if got, want := len(serverDebug.State.Sessions()), 2; got != want {
|
|
t.Errorf("len(server:Sessions) = %d, want %d", got, want)
|
|
}
|
|
clientDebug := debug.GetInstance(clientCtx)
|
|
if got, want := len(clientDebug.State.Servers()), 1; got != want {
|
|
t.Errorf("len(client:Servers) = %d, want %d", got, want)
|
|
}
|
|
// Close one of the connections to verify that the client and session were
|
|
// dropped.
|
|
if err := ed1.Shutdown(clientCtx); err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
if got, want := len(serverDebug.State.Sessions()), 1; got != want {
|
|
t.Errorf("len(server:Sessions()) = %d, want %d", got, want)
|
|
}
|
|
// TODO(rfindley): once disconnection works, assert that len(Clients) == 1
|
|
// (as of writing, it is still 2)
|
|
}
|