mirror of
https://github.com/golang/go
synced 2024-11-18 23:54:41 -07:00
f038785680
Due to the asynchronous and non-transactional nature of the LSP, writing regtests requires waiting for certain conditions to be met in the client editor. To this point, the only type of condition for which we supported waiting was the presence of diagnostic messages, but we can in principle wait for anything that is triggered by a server notification. This change generalizes the notion of expectations to also encompass log messages. Doing this required expanding the value returned from checking expectations to include a new "Unmeetable" verdict, to account for cases where we know that a condition will never be met (for example if it is a negative assertion). This may be useful for diagnostics as well. A test is added to demonstrate these new expectations, where the initial workspace load fails due to unfetchable dependencies. Additionally, some helper flags are added to help debug regtests without a code change, by allowing changing the test timeout, printing logs, and printing goroutine profiles. Updates golang/go#36879 Change-Id: I4cc194e10a4f181ad36a1a7abbb08ff41954b642 Reviewed-on: https://go-review.googlesource.com/c/tools/+/228399 Run-TryBot: Robert Findley <rfindley@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rebecca Stambler <rstambler@golang.org>
682 lines
19 KiB
Go
682 lines
19 KiB
Go
// Copyright 2020 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Package regtest provides an environment for writing regression tests.
|
|
package regtest
|
|
|
|
import (
|
|
"bytes"
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"io/ioutil"
|
|
"os"
|
|
"os/exec"
|
|
"path/filepath"
|
|
"regexp"
|
|
"runtime/pprof"
|
|
"strings"
|
|
"sync"
|
|
"testing"
|
|
"time"
|
|
|
|
"golang.org/x/tools/internal/jsonrpc2"
|
|
"golang.org/x/tools/internal/jsonrpc2/servertest"
|
|
"golang.org/x/tools/internal/lsp/cache"
|
|
"golang.org/x/tools/internal/lsp/debug"
|
|
"golang.org/x/tools/internal/lsp/fake"
|
|
"golang.org/x/tools/internal/lsp/lsprpc"
|
|
"golang.org/x/tools/internal/lsp/protocol"
|
|
)
|
|
|
|
// EnvMode is a bitmask that defines in which execution environments a test
|
|
// should run.
|
|
type EnvMode int
|
|
|
|
const (
|
|
// Singleton mode uses a separate cache for each test.
|
|
Singleton EnvMode = 1 << iota
|
|
|
|
// Forwarded forwards connections to an in-process gopls instance.
|
|
Forwarded
|
|
// SeparateProcess runs a separate gopls process, and forwards connections to
|
|
// it.
|
|
SeparateProcess
|
|
// NormalModes runs tests in all modes.
|
|
NormalModes = Singleton | Forwarded
|
|
)
|
|
|
|
// A Runner runs tests in gopls execution environments, as specified by its
|
|
// modes. For modes that share state (for example, a shared cache or common
|
|
// remote), any tests that execute on the same Runner will share the same
|
|
// state.
|
|
type Runner struct {
|
|
DefaultModes EnvMode
|
|
Timeout time.Duration
|
|
GoplsPath string
|
|
AlwaysPrintLogs bool
|
|
PrintGoroutinesOnFailure bool
|
|
|
|
mu sync.Mutex
|
|
ts *servertest.TCPServer
|
|
socketDir string
|
|
// closers is a queue of clean-up functions to run at the end of the entire
|
|
// test suite.
|
|
closers []io.Closer
|
|
}
|
|
|
|
// Modes returns the bitmask of environment modes this runner is configured to
|
|
// test.
|
|
func (r *Runner) Modes() EnvMode {
|
|
return r.DefaultModes
|
|
}
|
|
|
|
// getTestServer gets the test server instance to connect to, or creates one if
|
|
// it doesn't exist.
|
|
func (r *Runner) getTestServer() *servertest.TCPServer {
|
|
r.mu.Lock()
|
|
defer r.mu.Unlock()
|
|
if r.ts == nil {
|
|
ctx := context.Background()
|
|
ctx = debug.WithInstance(ctx, "", "")
|
|
ss := lsprpc.NewStreamServer(cache.New(ctx, nil))
|
|
r.ts = servertest.NewTCPServer(context.Background(), ss)
|
|
}
|
|
return r.ts
|
|
}
|
|
|
|
// runTestAsGoplsEnvvar triggers TestMain to run gopls instead of running
|
|
// tests. It's a trick to allow tests to find a binary to use to start a gopls
|
|
// subprocess.
|
|
const runTestAsGoplsEnvvar = "_GOPLS_TEST_BINARY_RUN_AS_GOPLS"
|
|
|
|
func (r *Runner) getRemoteSocket(t *testing.T) string {
|
|
t.Helper()
|
|
r.mu.Lock()
|
|
defer r.mu.Unlock()
|
|
const daemonFile = "gopls-test-daemon"
|
|
if r.socketDir != "" {
|
|
return filepath.Join(r.socketDir, daemonFile)
|
|
}
|
|
|
|
if r.GoplsPath == "" {
|
|
t.Fatal("cannot run tests with a separate process unless a path to a gopls binary is configured")
|
|
}
|
|
var err error
|
|
r.socketDir, err = ioutil.TempDir("", "gopls-regtests")
|
|
if err != nil {
|
|
t.Fatalf("creating tempdir: %v", err)
|
|
}
|
|
socket := filepath.Join(r.socketDir, daemonFile)
|
|
args := []string{"serve", "-listen", "unix;" + socket, "-listen.timeout", "10s"}
|
|
cmd := exec.Command(r.GoplsPath, args...)
|
|
cmd.Env = append(os.Environ(), runTestAsGoplsEnvvar+"=true")
|
|
var stderr bytes.Buffer
|
|
cmd.Stderr = &stderr
|
|
go func() {
|
|
if err := cmd.Run(); err != nil {
|
|
panic(fmt.Sprintf("error running external gopls: %v\nstderr:\n%s", err, stderr.String()))
|
|
}
|
|
}()
|
|
return socket
|
|
}
|
|
|
|
// AddCloser schedules a closer to be closed at the end of the test run. This
|
|
// is useful for Windows in particular, as
|
|
func (r *Runner) AddCloser(closer io.Closer) {
|
|
r.mu.Lock()
|
|
defer r.mu.Unlock()
|
|
r.closers = append(r.closers, closer)
|
|
}
|
|
|
|
// Close cleans up resource that have been allocated to this workspace.
|
|
func (r *Runner) Close() error {
|
|
r.mu.Lock()
|
|
defer r.mu.Unlock()
|
|
|
|
var errmsgs []string
|
|
if r.ts != nil {
|
|
if err := r.ts.Close(); err != nil {
|
|
errmsgs = append(errmsgs, err.Error())
|
|
}
|
|
}
|
|
if r.socketDir != "" {
|
|
if err := os.RemoveAll(r.socketDir); err != nil {
|
|
errmsgs = append(errmsgs, err.Error())
|
|
}
|
|
}
|
|
for _, closer := range r.closers {
|
|
if err := closer.Close(); err != nil {
|
|
errmsgs = append(errmsgs, err.Error())
|
|
}
|
|
}
|
|
if len(errmsgs) > 0 {
|
|
return fmt.Errorf("errors closing the test runner:\n\t%s", strings.Join(errmsgs, "\n\t"))
|
|
}
|
|
return nil
|
|
}
|
|
|
|
type runConfig struct {
|
|
modes EnvMode
|
|
proxyTxt string
|
|
timeout time.Duration
|
|
}
|
|
|
|
func (r *Runner) defaultConfig() *runConfig {
|
|
return &runConfig{
|
|
modes: r.DefaultModes,
|
|
timeout: r.Timeout,
|
|
}
|
|
}
|
|
|
|
// A RunOption augments the behavior of the test runner.
|
|
type RunOption interface {
|
|
set(*runConfig)
|
|
}
|
|
|
|
type optionSetter func(*runConfig)
|
|
|
|
func (f optionSetter) set(opts *runConfig) {
|
|
f(opts)
|
|
}
|
|
|
|
// WithTimeout configures a custom timeout for this test run.
|
|
func WithTimeout(d time.Duration) RunOption {
|
|
return optionSetter(func(opts *runConfig) {
|
|
opts.timeout = d
|
|
})
|
|
}
|
|
|
|
// WithProxy configures a file proxy using the given txtar-encoded string.
|
|
func WithProxy(txt string) RunOption {
|
|
return optionSetter(func(opts *runConfig) {
|
|
opts.proxyTxt = txt
|
|
})
|
|
}
|
|
|
|
// WithModes configures the execution modes that the test should run in.
|
|
func WithModes(modes EnvMode) RunOption {
|
|
return optionSetter(func(opts *runConfig) {
|
|
opts.modes = modes
|
|
})
|
|
}
|
|
|
|
// Run executes the test function in the default configured gopls execution
|
|
// modes. For each a test run, a new workspace is created containing the
|
|
// un-txtared files specified by filedata.
|
|
func (r *Runner) Run(t *testing.T, filedata string, test func(t *testing.T, e *Env), opts ...RunOption) {
|
|
t.Helper()
|
|
config := r.defaultConfig()
|
|
for _, opt := range opts {
|
|
opt.set(config)
|
|
}
|
|
|
|
tests := []struct {
|
|
name string
|
|
mode EnvMode
|
|
getServer func(context.Context, *testing.T) jsonrpc2.StreamServer
|
|
}{
|
|
{"singleton", Singleton, singletonEnv},
|
|
{"forwarded", Forwarded, r.forwardedEnv},
|
|
{"separate_process", SeparateProcess, r.separateProcessEnv},
|
|
}
|
|
|
|
for _, tc := range tests {
|
|
tc := tc
|
|
if config.modes&tc.mode == 0 {
|
|
continue
|
|
}
|
|
t.Run(tc.name, func(t *testing.T) {
|
|
t.Helper()
|
|
ctx, cancel := context.WithTimeout(context.Background(), config.timeout)
|
|
defer cancel()
|
|
ctx = debug.WithInstance(ctx, "", "")
|
|
|
|
ws, err := fake.NewWorkspace("regtest", filedata, config.proxyTxt)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
// Deferring the closure of ws until the end of the entire test suite
|
|
// has, in testing, given the LSP server time to properly shutdown and
|
|
// release any file locks held in workspace, which is a problem on
|
|
// Windows. This may still be flaky however, and in the future we need a
|
|
// better solution to ensure that all Go processes started by gopls have
|
|
// exited before we clean up.
|
|
r.AddCloser(ws)
|
|
ss := tc.getServer(ctx, t)
|
|
ls := &loggingServer{delegate: ss}
|
|
ts := servertest.NewPipeServer(ctx, ls)
|
|
defer func() {
|
|
ts.Close()
|
|
}()
|
|
env := NewEnv(ctx, t, ws, ts)
|
|
defer func() {
|
|
if t.Failed() && r.PrintGoroutinesOnFailure {
|
|
pprof.Lookup("goroutine").WriteTo(os.Stderr, 1)
|
|
}
|
|
if t.Failed() || r.AlwaysPrintLogs {
|
|
ls.printBuffers(t.Name(), os.Stderr)
|
|
}
|
|
if err := env.E.Shutdown(ctx); err != nil {
|
|
panic(err)
|
|
}
|
|
}()
|
|
test(t, env)
|
|
})
|
|
}
|
|
}
|
|
|
|
type loggingServer struct {
|
|
delegate jsonrpc2.StreamServer
|
|
|
|
mu sync.Mutex
|
|
buffers []*bytes.Buffer
|
|
}
|
|
|
|
func (s *loggingServer) ServeStream(ctx context.Context, stream jsonrpc2.Stream) error {
|
|
s.mu.Lock()
|
|
var buf bytes.Buffer
|
|
s.buffers = append(s.buffers, &buf)
|
|
s.mu.Unlock()
|
|
logStream := protocol.LoggingStream(stream, &buf)
|
|
return s.delegate.ServeStream(ctx, logStream)
|
|
}
|
|
|
|
func (s *loggingServer) printBuffers(testname string, w io.Writer) {
|
|
s.mu.Lock()
|
|
defer s.mu.Unlock()
|
|
|
|
for i, buf := range s.buffers {
|
|
fmt.Fprintf(os.Stderr, "#### Start Gopls Test Logs %d of %d for %q\n", i+1, len(s.buffers), testname)
|
|
io.Copy(w, buf)
|
|
fmt.Fprintf(os.Stderr, "#### End Gopls Test Logs %d of %d for %q\n", i+1, len(s.buffers), testname)
|
|
}
|
|
}
|
|
|
|
func singletonEnv(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
|
|
return lsprpc.NewStreamServer(cache.New(ctx, nil))
|
|
}
|
|
|
|
func (r *Runner) forwardedEnv(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
|
|
ts := r.getTestServer()
|
|
return lsprpc.NewForwarder("tcp", ts.Addr)
|
|
}
|
|
|
|
func (r *Runner) separateProcessEnv(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
|
|
// TODO(rfindley): can we use the autostart behavior here, instead of
|
|
// pre-starting the remote?
|
|
socket := r.getRemoteSocket(t)
|
|
return lsprpc.NewForwarder("unix", socket)
|
|
}
|
|
|
|
// Env holds an initialized fake Editor, Workspace, and Server, which may be
|
|
// used for writing tests. It also provides adapter methods that call t.Fatal
|
|
// on any error, so that tests for the happy path may be written without
|
|
// checking errors.
|
|
type Env struct {
|
|
T *testing.T
|
|
Ctx context.Context
|
|
|
|
// Most tests should not need to access the workspace, editor, server, or
|
|
// connection, but they are available if needed.
|
|
W *fake.Workspace
|
|
E *fake.Editor
|
|
Server servertest.Connector
|
|
Conn *jsonrpc2.Conn
|
|
|
|
// mu guards the fields below, for the purpose of checking conditions on
|
|
// every change to diagnostics.
|
|
mu sync.Mutex
|
|
// For simplicity, each waiter gets a unique ID.
|
|
nextWaiterID int
|
|
state State
|
|
waiters map[int]*condition
|
|
}
|
|
|
|
// State encapsulates the server state TODO: explain more
|
|
type State struct {
|
|
// diagnostics are a map of relative path->diagnostics params
|
|
diagnostics map[string]*protocol.PublishDiagnosticsParams
|
|
logs []*protocol.LogMessageParams
|
|
}
|
|
|
|
func (s State) String() string {
|
|
var b strings.Builder
|
|
b.WriteString("#### log messages (see RPC logs for full text):\n")
|
|
for _, msg := range s.logs {
|
|
summary := fmt.Sprintf("%v: %q", msg.Type, msg.Message)
|
|
if len(summary) > 60 {
|
|
summary = summary[:57] + "..."
|
|
}
|
|
// Some logs are quite long, and since they should be reproduced in the RPC
|
|
// logs on any failure we include here just a short summary.
|
|
fmt.Fprint(&b, "\t"+summary+"\n")
|
|
}
|
|
b.WriteString("\n")
|
|
b.WriteString("#### diagnostics:\n")
|
|
for name, params := range s.diagnostics {
|
|
fmt.Fprintf(&b, "\t%s (version %d):\n", name, int(params.Version))
|
|
for _, d := range params.Diagnostics {
|
|
fmt.Fprintf(&b, "\t\t(%d, %d): %s\n", int(d.Range.Start.Line), int(d.Range.Start.Character), d.Message)
|
|
}
|
|
}
|
|
return b.String()
|
|
}
|
|
|
|
// A condition is satisfied when all expectations are simultaneously
|
|
// met. At that point, the 'met' channel is closed. On any failure, err is set
|
|
// and the failed channel is closed.
|
|
type condition struct {
|
|
expectations []Expectation
|
|
verdict chan Verdict
|
|
}
|
|
|
|
// NewEnv creates a new test environment using the given workspace and gopls
|
|
// server.
|
|
func NewEnv(ctx context.Context, t *testing.T, ws *fake.Workspace, ts servertest.Connector) *Env {
|
|
t.Helper()
|
|
conn := ts.Connect(ctx)
|
|
editor, err := fake.NewConnectedEditor(ctx, ws, conn)
|
|
if err != nil {
|
|
t.Fatal(err)
|
|
}
|
|
env := &Env{
|
|
T: t,
|
|
Ctx: ctx,
|
|
W: ws,
|
|
E: editor,
|
|
Server: ts,
|
|
Conn: conn,
|
|
state: State{
|
|
diagnostics: make(map[string]*protocol.PublishDiagnosticsParams),
|
|
},
|
|
waiters: make(map[int]*condition),
|
|
}
|
|
env.E.Client().OnDiagnostics(env.onDiagnostics)
|
|
env.E.Client().OnLogMessage(env.onLogMessage)
|
|
return env
|
|
}
|
|
|
|
func (e *Env) onDiagnostics(_ context.Context, d *protocol.PublishDiagnosticsParams) error {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
|
|
pth := e.W.URIToPath(d.URI)
|
|
e.state.diagnostics[pth] = d
|
|
e.checkConditionsLocked()
|
|
return nil
|
|
}
|
|
|
|
func (e *Env) onLogMessage(_ context.Context, m *protocol.LogMessageParams) error {
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
e.state.logs = append(e.state.logs, m)
|
|
e.checkConditionsLocked()
|
|
return nil
|
|
}
|
|
|
|
func (e *Env) checkConditionsLocked() {
|
|
for id, condition := range e.waiters {
|
|
if v, _ := checkExpectations(e.state, condition.expectations); v != Unmet {
|
|
delete(e.waiters, id)
|
|
condition.verdict <- v
|
|
}
|
|
}
|
|
}
|
|
|
|
// ExpectNow asserts that the current state of the editor matches the given
|
|
// expectations.
|
|
//
|
|
// It can be used together with Env.Await to allow waiting on
|
|
// simple expectations, followed by more detailed expectations tested by
|
|
// ExpectNow. For example:
|
|
//
|
|
// env.RegexpReplace("foo.go", "a", "x")
|
|
// env.Await(env.AnyDiagnosticAtCurrentVersion("foo.go"))
|
|
// env.ExpectNow(env.DiagnosticAtRegexp("foo.go", "x"))
|
|
//
|
|
// This has the advantage of not timing out if the diagnostic received for
|
|
// "foo.go" does not match the expectation: instead it fails early.
|
|
func (e *Env) ExpectNow(expectations ...Expectation) {
|
|
e.T.Helper()
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
if verdict, summary := checkExpectations(e.state, expectations); verdict != Met {
|
|
e.T.Fatalf("expectations unmet:\n%s\ncurrent state:\n%v", summary, e.state)
|
|
}
|
|
}
|
|
|
|
// checkExpectations reports whether s meets all expectations.
|
|
func checkExpectations(s State, expectations []Expectation) (Verdict, string) {
|
|
finalVerdict := Met
|
|
var summary strings.Builder
|
|
for _, e := range expectations {
|
|
v := e.Check(s)
|
|
if v > finalVerdict {
|
|
finalVerdict = v
|
|
}
|
|
summary.WriteString(fmt.Sprintf("\t%v: %s\n", v, e.Description()))
|
|
}
|
|
return finalVerdict, summary.String()
|
|
}
|
|
|
|
// An Expectation asserts that the state of the editor at a point in time
|
|
// matches an expected condition. This is used for signaling in tests when
|
|
// certain conditions in the editor are met.
|
|
type Expectation interface {
|
|
// Check determines whether the state of the editor satisfies the
|
|
// expectation.
|
|
Check(State) Verdict
|
|
// Description is a human-readable description of the expectation.
|
|
Description() string
|
|
}
|
|
|
|
// A Verdict is the result of checking an expectation against the current
|
|
// editor state.
|
|
type Verdict int
|
|
|
|
// Order matters for the following constants: verdicts are sorted in order of
|
|
// decisiveness.
|
|
const (
|
|
// Met indicates that an expectation is satisfied by the current state.
|
|
Met Verdict = iota
|
|
// Unmet indicates that an expectation is not currently met, but could be met
|
|
// in the future.
|
|
Unmet
|
|
// Unmeetable indicates that an expectation cannot be satisfied in the
|
|
// future.
|
|
Unmeetable
|
|
)
|
|
|
|
func (v Verdict) String() string {
|
|
switch v {
|
|
case Met:
|
|
return "Met"
|
|
case Unmet:
|
|
return "Unmet"
|
|
case Unmeetable:
|
|
return "Unmeetable"
|
|
}
|
|
return fmt.Sprintf("unrecognized verdict %d", v)
|
|
}
|
|
|
|
// LogExpectation is an expectation on the log messages received by the editor
|
|
// from gopls.
|
|
type LogExpectation struct {
|
|
check func([]*protocol.LogMessageParams) Verdict
|
|
description string
|
|
}
|
|
|
|
// Check implements the Expectation interface.
|
|
func (e LogExpectation) Check(s State) Verdict {
|
|
return e.check(s.logs)
|
|
}
|
|
|
|
// Description implements the Expectation interface.
|
|
func (e LogExpectation) Description() string {
|
|
return e.description
|
|
}
|
|
|
|
// NoErrorLogs asserts that the client has not received any log messages of
|
|
// error severity.
|
|
func NoErrorLogs() LogExpectation {
|
|
check := func(msgs []*protocol.LogMessageParams) Verdict {
|
|
for _, msg := range msgs {
|
|
if msg.Type == protocol.Error {
|
|
return Unmeetable
|
|
}
|
|
}
|
|
return Met
|
|
}
|
|
return LogExpectation{
|
|
check: check,
|
|
description: "no errors have been logged",
|
|
}
|
|
}
|
|
|
|
// LogMatching asserts that the client has received a log message
|
|
// matching of type typ matching the regexp re.
|
|
func LogMatching(typ protocol.MessageType, re string) LogExpectation {
|
|
rec, err := regexp.Compile(re)
|
|
if err != nil {
|
|
panic(err)
|
|
}
|
|
check := func(msgs []*protocol.LogMessageParams) Verdict {
|
|
for _, msg := range msgs {
|
|
if msg.Type == typ && rec.Match([]byte(msg.Message)) {
|
|
return Met
|
|
}
|
|
}
|
|
return Unmet
|
|
}
|
|
return LogExpectation{
|
|
check: check,
|
|
description: fmt.Sprintf("log message matching %q", re),
|
|
}
|
|
}
|
|
|
|
// A DiagnosticExpectation is a condition that must be met by the current set
|
|
// of diagnostics for a file.
|
|
type DiagnosticExpectation struct {
|
|
// IsMet determines whether the diagnostics for this file version satisfy our
|
|
// expectation.
|
|
isMet func(*protocol.PublishDiagnosticsParams) bool
|
|
// Description is a human-readable description of the diagnostic expectation.
|
|
description string
|
|
// Path is the workspace-relative path to the file being asserted on.
|
|
path string
|
|
}
|
|
|
|
// Check implements the Expectation interface.
|
|
func (e DiagnosticExpectation) Check(s State) Verdict {
|
|
if diags, ok := s.diagnostics[e.path]; ok && e.isMet(diags) {
|
|
return Met
|
|
}
|
|
return Unmet
|
|
}
|
|
|
|
// Description implements the Expectation interface.
|
|
func (e DiagnosticExpectation) Description() string {
|
|
return fmt.Sprintf("%s: %s", e.path, e.description)
|
|
}
|
|
|
|
// EmptyDiagnostics asserts that diagnostics are empty for the
|
|
// workspace-relative path name.
|
|
func EmptyDiagnostics(name string) DiagnosticExpectation {
|
|
isMet := func(diags *protocol.PublishDiagnosticsParams) bool {
|
|
return len(diags.Diagnostics) == 0
|
|
}
|
|
return DiagnosticExpectation{
|
|
isMet: isMet,
|
|
description: "empty diagnostics",
|
|
path: name,
|
|
}
|
|
}
|
|
|
|
// AnyDiagnosticAtCurrentVersion asserts that there is a diagnostic report for
|
|
// the current edited version of the buffer corresponding to the given
|
|
// workspace-relative pathname.
|
|
func (e *Env) AnyDiagnosticAtCurrentVersion(name string) DiagnosticExpectation {
|
|
version := e.E.BufferVersion(name)
|
|
isMet := func(diags *protocol.PublishDiagnosticsParams) bool {
|
|
return int(diags.Version) == version
|
|
}
|
|
return DiagnosticExpectation{
|
|
isMet: isMet,
|
|
description: fmt.Sprintf("any diagnostics at version %d", version),
|
|
path: name,
|
|
}
|
|
}
|
|
|
|
// DiagnosticAtRegexp expects that there is a diagnostic entry at the start
|
|
// position matching the regexp search string re in the buffer specified by
|
|
// name. Note that this currently ignores the end position.
|
|
func (e *Env) DiagnosticAtRegexp(name, re string) DiagnosticExpectation {
|
|
pos := e.RegexpSearch(name, re)
|
|
expectation := DiagnosticAt(name, pos.Line, pos.Column)
|
|
expectation.description += fmt.Sprintf(" (location of %q)", re)
|
|
return expectation
|
|
}
|
|
|
|
// DiagnosticAt asserts that there is a diagnostic entry at the position
|
|
// specified by line and col, for the workspace-relative path name.
|
|
func DiagnosticAt(name string, line, col int) DiagnosticExpectation {
|
|
isMet := func(diags *protocol.PublishDiagnosticsParams) bool {
|
|
for _, d := range diags.Diagnostics {
|
|
if d.Range.Start.Line == float64(line) && d.Range.Start.Character == float64(col) {
|
|
return true
|
|
}
|
|
}
|
|
return false
|
|
}
|
|
return DiagnosticExpectation{
|
|
isMet: isMet,
|
|
description: fmt.Sprintf("diagnostic at {line:%d, column:%d}", line, col),
|
|
path: name,
|
|
}
|
|
}
|
|
|
|
// Await waits for all expectations to simultaneously be met. It should only be
|
|
// called from the main test goroutine.
|
|
func (e *Env) Await(expectations ...Expectation) {
|
|
e.T.Helper()
|
|
e.mu.Lock()
|
|
// Before adding the waiter, we check if the condition is currently met or
|
|
// failed to avoid a race where the condition was realized before Await was
|
|
// called.
|
|
switch verdict, summary := checkExpectations(e.state, expectations); verdict {
|
|
case Met:
|
|
return
|
|
case Unmeetable:
|
|
e.mu.Unlock()
|
|
e.T.Fatalf("unmeetable expectations:\n%s\nstate:\n%v", summary, e.state)
|
|
}
|
|
cond := &condition{
|
|
expectations: expectations,
|
|
verdict: make(chan Verdict),
|
|
}
|
|
e.waiters[e.nextWaiterID] = cond
|
|
e.nextWaiterID++
|
|
e.mu.Unlock()
|
|
|
|
var err error
|
|
select {
|
|
case <-e.Ctx.Done():
|
|
err = e.Ctx.Err()
|
|
case v := <-cond.verdict:
|
|
if v != Met {
|
|
err = fmt.Errorf("condition has final verdict %v", v)
|
|
}
|
|
}
|
|
|
|
if err != nil {
|
|
// Debugging an unmet expectation can be tricky, so we put some effort into
|
|
// nicely formatting the failure.
|
|
e.mu.Lock()
|
|
defer e.mu.Unlock()
|
|
_, summary := checkExpectations(e.state, expectations)
|
|
e.T.Fatalf("waiting on:\n%s\nerr:%v\nstate:\n%v", err, summary, e.state)
|
|
}
|
|
}
|