1
0
mirror of https://github.com/golang/go synced 2024-11-19 07:04:43 -07:00
go/internal/lsp/lsprpc/lsprpc_test.go
Rob Findley 88be01311a internal/jsonrpc2: fix races in cancellation
We had a deadlock in cases where a request was cancelled (1) after being
written to the stream, but (2) before a response was received. This
resulted in the request ID being removed from the pending map while the
server has the request, after which point the server response would hang
in Conn.Run trying to send to a nil channel.

After fixing this nil send there was still a race: it was possible that
Conn.Run could get the pending request, and Conn.Call would select
ctx.Done before Conn.Run could send to the response channel, again
resulting in a blocking send. Fix this by adding a buffer to the
response channel.

The response channel management is also made less forgiving, because we
should be able to reason precisely about how many sends and receives
will occur:
 + Don't close the response channel after sending a response: there
   should only be one recipient.
 + Don't delete the ID from pending map twice: it should only be cleaned
   up by Conn.Call.

Cancellation tests in the lsprpc package are updated to exercise the
race conditions.

Fixes golang/go#37159

Change-Id: Ie3207442ea910f79247b18d8647fd52f39fb15db
Reviewed-on: https://go-review.googlesource.com/c/tools/+/219126
Run-TryBot: Robert Findley <rfindley@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Heschi Kreinick <heschi@google.com>
2020-02-14 14:43:24 +00:00

153 lines
3.6 KiB
Go

// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package lsprpc
import (
"context"
"regexp"
"sync"
"testing"
"time"
"golang.org/x/tools/internal/jsonrpc2/servertest"
"golang.org/x/tools/internal/lsp/protocol"
"golang.org/x/tools/internal/telemetry/log"
)
type fakeClient struct {
protocol.Client
logs chan string
}
func (c fakeClient) LogMessage(ctx context.Context, params *protocol.LogMessageParams) error {
c.logs <- params.Message
return nil
}
type pingServer struct{ protocol.Server }
func (s pingServer) DidOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error {
log.Print(ctx, "ping")
return nil
}
func TestClientLogging(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
server := pingServer{}
client := fakeClient{logs: make(chan string, 10)}
ss := &StreamServer{
accept: func(c protocol.Client) protocol.Server {
return server
},
}
ts := servertest.NewServer(ctx, ss)
cc := ts.Connect(ctx)
cc.AddHandler(protocol.ClientHandler(client))
protocol.ServerDispatcher(cc).DidOpen(ctx, &protocol.DidOpenTextDocumentParams{})
select {
case got := <-client.logs:
want := "ping"
matched, err := regexp.MatchString(want, got)
if err != nil {
t.Fatal(err)
}
if !matched {
t.Errorf("got log %q, want a log containing %q", got, want)
}
case <-time.After(1 * time.Second):
t.Error("timeout waiting for client log")
}
}
// waitableServer instruments LSP request so that we can control their timing.
// The requests chosen are arbitrary: we simply needed one that blocks, and
// another that doesn't.
type waitableServer struct {
protocol.Server
started chan struct{}
}
func (s waitableServer) Hover(ctx context.Context, _ *protocol.HoverParams) (*protocol.Hover, error) {
s.started <- struct{}{}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(200 * time.Millisecond):
}
return &protocol.Hover{}, nil
}
func (s waitableServer) Resolve(_ context.Context, item *protocol.CompletionItem) (*protocol.CompletionItem, error) {
return item, nil
}
func TestRequestCancellation(t *testing.T) {
server := waitableServer{
started: make(chan struct{}),
}
ss := &StreamServer{
accept: func(c protocol.Client) protocol.Server {
return server
},
}
ctx := context.Background()
tsDirect := servertest.NewServer(ctx, ss)
forwarder := NewForwarder(tsDirect.Addr, false)
tsForwarded := servertest.NewServer(ctx, forwarder)
tests := []struct {
serverType string
ts *servertest.Server
}{
{"direct", tsDirect},
{"forwarder", tsForwarded},
}
for _, test := range tests {
t.Run(test.serverType, func(t *testing.T) {
cc := test.ts.Connect(ctx)
cc.AddHandler(protocol.Canceller{})
ctx := context.Background()
ctx1, cancel1 := context.WithCancel(ctx)
var (
err1, err2 error
wg sync.WaitGroup
)
wg.Add(2)
go func() {
defer wg.Done()
_, err1 = protocol.ServerDispatcher(cc).Hover(ctx1, &protocol.HoverParams{})
}()
go func() {
defer wg.Done()
_, err2 = protocol.ServerDispatcher(cc).Resolve(ctx, &protocol.CompletionItem{})
}()
// Wait for the Hover request to start.
<-server.started
cancel1()
wg.Wait()
if err1 == nil {
t.Errorf("cancelled Hover(): got nil err")
}
if err2 != nil {
t.Errorf("uncancelled Hover(): err: %v", err2)
}
if _, err := protocol.ServerDispatcher(cc).Resolve(ctx, &protocol.CompletionItem{}); err != nil {
t.Errorf("subsequent Hover(): %v", err)
}
})
}
}
// TODO: add a test for telemetry.