1
0
mirror of https://github.com/golang/go synced 2024-11-18 09:44:50 -07:00

internal/lsp/lsprpc: improvements to daemon logging

The gopls daemon had different default logging behavior than the sidecar
gopls: by default, the daemon was started with -logfile=auto.
Additionally, because most logs are reflected back to the forwarder, the
actual daemon logs have very little (if any) information.

This means that if you simply start gopls with -remote=auto, you'll get
a single logfile named /tmp/gopls-<pid>.log, which is mostly empty. This
is not a delightful experience.

Fix this via several improvements:
 + Log lifecycle events in the Daemon, to give the log a purpose.
 + Give the daemon a new default log location:
   /tmp/gopls-daemon-<pid>.log.
 + Don't pass -logfile=auto to the daemon by default.

Fixes golang/go#40105

Change-Id: I5e91ea98b4968c512bce76a596bbae441f461a66
Reviewed-on: https://go-review.googlesource.com/c/tools/+/241440
Run-TryBot: Robert Findley <rfindley@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Rebecca Stambler <rstambler@golang.org>
This commit is contained in:
Rob Findley 2020-07-08 13:49:07 -04:00 committed by Robert Findley
parent cf799caead
commit f2c07d7d8e
8 changed files with 51 additions and 25 deletions

View File

@ -116,11 +116,15 @@ servers and clients.
### Using logfiles ### Using logfiles
The gopls daemon is started with `-logfile=auto` by default. To customize this, The gopls daemon is started with logging disabled by default. To customize
pass `-remote.logfile` to the gopls forwarder. this, pass `-remote.logfile` to the gopls forwarder. Using
`-remote.logfile=auto`, the daemon will log to a default location (on posix
systems: `/tmp/gopls-daemon-<pid>.log`).
By default, the gopls daemon is not started with the `-rpc.trace` flag, so its The gopls daemon does not log session-scoped messages: those are instead
logfile will only contain actual debug logs from the gopls process. reflected back to the forwarder so that they can be accessed by the editor.
Daemon logs will only contain global messages, for example logs when sessions
connect and disconnect.
It is recommended to start the forwarder gopls process with `-rpc.trace`, so It is recommended to start the forwarder gopls process with `-rpc.trace`, so
that its logfile will contain rpc trace logs specific to the LSP session. that its logfile will contain rpc trace logs specific to the LSP session.

View File

@ -92,7 +92,6 @@ func New(name, wd string, env []string, options func(*source.Options)) *Applicat
Serve: Serve{ Serve: Serve{
RemoteListenTimeout: 1 * time.Minute, RemoteListenTimeout: 1 * time.Minute,
RemoteLogfile: "auto",
}, },
} }
return app return app

View File

@ -8,6 +8,7 @@ import (
"context" "context"
"flag" "flag"
"fmt" "fmt"
"log"
"os" "os"
"strings" "strings"
"time" "time"
@ -62,8 +63,9 @@ func (s *Serve) Run(ctx context.Context, args ...string) error {
} }
di := debug.GetInstance(ctx) di := debug.GetInstance(ctx)
isDaemon := s.Address != "" || s.Port != 0
if di != nil { if di != nil {
closeLog, err := di.SetLogFile(s.Logfile) closeLog, err := di.SetLogFile(s.Logfile, isDaemon)
if err != nil { if err != nil {
return err return err
} }
@ -82,16 +84,21 @@ func (s *Serve) Run(ctx context.Context, args ...string) error {
lsprpc.RemoteLogfile(s.RemoteLogfile), lsprpc.RemoteLogfile(s.RemoteLogfile),
) )
} else { } else {
ss = lsprpc.NewStreamServer(cache.New(ctx, s.app.options)) ss = lsprpc.NewStreamServer(cache.New(ctx, s.app.options), isDaemon)
} }
var network, addr string
if s.Address != "" { if s.Address != "" {
network, addr := parseAddr(s.Address) network, addr = parseAddr(s.Address)
return jsonrpc2.ListenAndServe(ctx, network, addr, ss, s.IdleTimeout)
} }
if s.Port != 0 { if s.Port != 0 {
addr := fmt.Sprintf(":%v", s.Port) network = "tcp"
return jsonrpc2.ListenAndServe(ctx, "tcp", addr, ss, s.IdleTimeout) addr = fmt.Sprintf(":%v", s.Port)
}
if addr != "" {
log.Printf("Gopls daemon: listening on %s network, address %s...", network, addr)
defer log.Printf("Gopls daemon: exiting")
return jsonrpc2.ListenAndServe(ctx, network, addr, ss, s.IdleTimeout)
} }
stream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", os.Stdin, os.Stdout)) stream := jsonrpc2.NewHeaderStream(fakenet.NewConn("stdio", os.Stdin, os.Stdout))
if s.Trace && di != nil { if s.Trace && di != nil {

View File

@ -68,7 +68,7 @@ func TestCommandLine(testdata string, options func(*source.Options)) func(*testi
func NewTestServer(ctx context.Context, options func(*source.Options)) *servertest.TCPServer { func NewTestServer(ctx context.Context, options func(*source.Options)) *servertest.TCPServer {
ctx = debug.WithInstance(ctx, "", "") ctx = debug.WithInstance(ctx, "", "")
cache := cache.New(ctx, options) cache := cache.New(ctx, options)
ss := lsprpc.NewStreamServer(cache) ss := lsprpc.NewStreamServer(cache, false)
return servertest.NewTCPServer(ctx, ss, nil) return servertest.NewTCPServer(ctx, ss, nil)
} }

View File

@ -323,14 +323,18 @@ func WithInstance(ctx context.Context, workdir, agent string) context.Context {
} }
// SetLogFile sets the logfile for use with this instance. // SetLogFile sets the logfile for use with this instance.
func (i *Instance) SetLogFile(logfile string) (func(), error) { func (i *Instance) SetLogFile(logfile string, isDaemon bool) (func(), error) {
// TODO: probably a better solution for deferring closure to the caller would // TODO: probably a better solution for deferring closure to the caller would
// be for the debug instance to itself be closed, but this fixes the // be for the debug instance to itself be closed, but this fixes the
// immediate bug of logs not being captured. // immediate bug of logs not being captured.
closeLog := func() {} closeLog := func() {}
if logfile != "" { if logfile != "" {
if logfile == "auto" { if logfile == "auto" {
logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.log", os.Getpid())) if isDaemon {
logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-daemon-%d.log", os.Getpid()))
} else {
logfile = filepath.Join(os.TempDir(), fmt.Sprintf("gopls-%d.log", os.Getpid()))
}
} }
f, err := os.Create(logfile) f, err := os.Create(logfile)
if err != nil { if err != nil {

View File

@ -38,6 +38,8 @@ var serverIndex int64
// streams as a new LSP session, using a shared cache. // streams as a new LSP session, using a shared cache.
type StreamServer struct { type StreamServer struct {
cache *cache.Cache cache *cache.Cache
// logConnections controls whether or not to log new connections.
logConnections bool
// serverForTest may be set to a test fake for testing. // serverForTest may be set to a test fake for testing.
serverForTest protocol.Server serverForTest protocol.Server
@ -46,8 +48,8 @@ type StreamServer struct {
// NewStreamServer creates a StreamServer using the shared cache. If // NewStreamServer creates a StreamServer using the shared cache. If
// withTelemetry is true, each session is instrumented with telemetry that // withTelemetry is true, each session is instrumented with telemetry that
// records RPC statistics. // records RPC statistics.
func NewStreamServer(cache *cache.Cache) *StreamServer { func NewStreamServer(cache *cache.Cache, logConnections bool) *StreamServer {
return &StreamServer{cache: cache} return &StreamServer{cache: cache, logConnections: logConnections}
} }
// ServeStream implements the jsonrpc2.StreamServer interface, by handling // ServeStream implements the jsonrpc2.StreamServer interface, by handling
@ -78,6 +80,10 @@ func (s *StreamServer) ServeStream(ctx context.Context, conn jsonrpc2.Conn) erro
handshaker(session, executable, handshaker(session, executable,
protocol.ServerHandler(server, protocol.ServerHandler(server,
jsonrpc2.MethodNotFound)))) jsonrpc2.MethodNotFound))))
if s.logConnections {
log.Printf("Session %s: connected", session.ID())
defer log.Printf("Session %s: exited", session.ID())
}
<-conn.Done() <-conn.Done()
return conn.Err() return conn.Err()
} }
@ -146,7 +152,6 @@ func NewForwarder(network, addr string, opts ...ForwarderOption) *Forwarder {
goplsPath: gp, goplsPath: gp,
dialTimeout: 1 * time.Second, dialTimeout: 1 * time.Second,
retries: 5, retries: 5,
remoteLogfile: "auto",
remoteListenTimeout: 1 * time.Minute, remoteListenTimeout: 1 * time.Minute,
} }
for _, opt := range opts { for _, opt := range opts {
@ -295,7 +300,9 @@ func (f *Forwarder) connectToRemote(ctx context.Context) (net.Conn, error) {
args := []string{"serve", args := []string{"serve",
"-listen", fmt.Sprintf(`%s;%s`, network, address), "-listen", fmt.Sprintf(`%s;%s`, network, address),
"-listen.timeout", f.remoteListenTimeout.String(), "-listen.timeout", f.remoteListenTimeout.String(),
"-logfile", f.remoteLogfile, }
if f.remoteLogfile != "" {
args = append(args, "-logfile", f.remoteLogfile)
} }
if f.remoteDebug != "" { if f.remoteDebug != "" {
args = append(args, "-debug", f.remoteDebug) args = append(args, "-debug", f.remoteDebug)
@ -466,11 +473,16 @@ func handshaker(session *cache.Session, goplsPath string, handler jsonrpc2.Handl
return func(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2.Request) error { return func(ctx context.Context, reply jsonrpc2.Replier, r jsonrpc2.Request) error {
switch r.Method() { switch r.Method() {
case handshakeMethod: case handshakeMethod:
// We log.Printf in this handler, rather than event.Log when we want logs
// to go to the daemon log rather than being reflected back to the
// client.
var req handshakeRequest var req handshakeRequest
if err := json.Unmarshal(r.Params(), &req); err != nil { if err := json.Unmarshal(r.Params(), &req); err != nil {
log.Printf("Error processing handshake for session %s: %v", session.ID(), err)
sendError(ctx, reply, err) sendError(ctx, reply, err)
return nil return nil
} }
log.Printf("Session %s: got handshake. Logfile: %q, Debug addr: %q", session.ID(), req.Logfile, req.DebugAddr)
event.Log(ctx, "Handshake session update", event.Log(ctx, "Handshake session update",
cache.KeyUpdateSession.Of(session), cache.KeyUpdateSession.Of(session),
tag.DebugAddress.Of(req.DebugAddr), tag.DebugAddress.Of(req.DebugAddr),

View File

@ -56,7 +56,7 @@ func TestClientLogging(t *testing.T) {
client := fakeClient{logs: make(chan string, 10)} client := fakeClient{logs: make(chan string, 10)}
ctx = debug.WithInstance(ctx, "", "") ctx = debug.WithInstance(ctx, "", "")
ss := NewStreamServer(cache.New(ctx, nil)) ss := NewStreamServer(cache.New(ctx, nil), false)
ss.serverForTest = server ss.serverForTest = server
ts := servertest.NewPipeServer(ctx, ss, nil) ts := servertest.NewPipeServer(ctx, ss, nil)
defer checkClose(t, ts.Close) defer checkClose(t, ts.Close)
@ -113,7 +113,7 @@ func checkClose(t *testing.T, closer func() error) {
func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (direct, forwarded servertest.Connector, cleanup func()) { func setupForwarding(ctx context.Context, t *testing.T, s protocol.Server) (direct, forwarded servertest.Connector, cleanup func()) {
t.Helper() t.Helper()
serveCtx := debug.WithInstance(ctx, "", "") serveCtx := debug.WithInstance(ctx, "", "")
ss := NewStreamServer(cache.New(serveCtx, nil)) ss := NewStreamServer(cache.New(serveCtx, nil), false)
ss.serverForTest = s ss.serverForTest = s
tsDirect := servertest.NewTCPServer(serveCtx, ss, nil) tsDirect := servertest.NewTCPServer(serveCtx, ss, nil)
@ -217,7 +217,7 @@ func TestDebugInfoLifecycle(t *testing.T) {
serverCtx := debug.WithInstance(baseCtx, "", "") serverCtx := debug.WithInstance(baseCtx, "", "")
cache := cache.New(serverCtx, nil) cache := cache.New(serverCtx, nil)
ss := NewStreamServer(cache) ss := NewStreamServer(cache, false)
tsBackend := servertest.NewTCPServer(serverCtx, ss, nil) tsBackend := servertest.NewTCPServer(serverCtx, ss, nil)
forwarder := NewForwarder("tcp", tsBackend.Addr) forwarder := NewForwarder("tcp", tsBackend.Addr)

View File

@ -237,7 +237,7 @@ func (s *loggingFramer) printBuffers(testname string, w io.Writer) {
} }
func singletonServer(ctx context.Context, t *testing.T) jsonrpc2.StreamServer { func singletonServer(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
return lsprpc.NewStreamServer(cache.New(ctx, nil)) return lsprpc.NewStreamServer(cache.New(ctx, nil), false)
} }
func (r *Runner) forwardedServer(ctx context.Context, t *testing.T) jsonrpc2.StreamServer { func (r *Runner) forwardedServer(ctx context.Context, t *testing.T) jsonrpc2.StreamServer {
@ -245,15 +245,15 @@ func (r *Runner) forwardedServer(ctx context.Context, t *testing.T) jsonrpc2.Str
return lsprpc.NewForwarder("tcp", ts.Addr) return lsprpc.NewForwarder("tcp", ts.Addr)
} }
// getTestServer gets the test server instance to connect to, or creates one if // getTestServer gets the shared test server instance to connect to, or creates
// it doesn't exist. // one if it doesn't exist.
func (r *Runner) getTestServer() *servertest.TCPServer { func (r *Runner) getTestServer() *servertest.TCPServer {
r.mu.Lock() r.mu.Lock()
defer r.mu.Unlock() defer r.mu.Unlock()
if r.ts == nil { if r.ts == nil {
ctx := context.Background() ctx := context.Background()
ctx = debug.WithInstance(ctx, "", "") ctx = debug.WithInstance(ctx, "", "")
ss := lsprpc.NewStreamServer(cache.New(ctx, nil)) ss := lsprpc.NewStreamServer(cache.New(ctx, nil), false)
r.ts = servertest.NewTCPServer(ctx, ss, nil) r.ts = servertest.NewTCPServer(ctx, ss, nil)
} }
return r.ts return r.ts