From 3e360b035f4c3014e9564f4994c68ccc296ef629 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Tue, 14 Feb 2023 12:25:11 -0500 Subject: [PATCH] runtime: new API for filling PC traceback buffers Currently, filling PC traceback buffers is one of the jobs of gentraceback. This moves it into a new function, tracebackPCs, with a simple API built around unwinder, and changes all callers to use this new API. Updates #54466. Change-Id: Id2038bded81bf533a5a4e71178a7c014904d938c Reviewed-on: https://go-review.googlesource.com/c/go/+/468300 Reviewed-by: Michael Pratt TryBot-Result: Gopher Robot Run-TryBot: Austin Clements --- src/runtime/export_test.go | 4 +- src/runtime/mprof.go | 4 +- src/runtime/proc.go | 15 +++---- src/runtime/traceback.go | 92 ++++++++++++++++++++++---------------- 4 files changed, 66 insertions(+), 49 deletions(-) diff --git a/src/runtime/export_test.go b/src/runtime/export_test.go index 25758972f13..31d32eabebd 100644 --- a/src/runtime/export_test.go +++ b/src/runtime/export_test.go @@ -483,7 +483,9 @@ func LockOSCounts() (external, internal uint32) { func TracebackSystemstack(stk []uintptr, i int) int { if i == 0 { pc, sp := getcallerpc(), getcallersp() - return gentraceback(pc, sp, 0, getg(), 0, &stk[0], len(stk), nil, nil, _TraceJumpStack) + var u unwinder + u.initAt(pc, sp, 0, getg(), unwindJumpStack) // Don't ignore errors, for testing + return tracebackPCs(&u, 0, stk) } n := 0 systemstack(func() { diff --git a/src/runtime/mprof.go b/src/runtime/mprof.go index cf9e0329816..dfaa369740b 100644 --- a/src/runtime/mprof.go +++ b/src/runtime/mprof.go @@ -1173,7 +1173,9 @@ func GoroutineProfile(p []StackRecord) (n int, ok bool) { } func saveg(pc, sp uintptr, gp *g, r *StackRecord) { - n := gentraceback(pc, sp, 0, gp, 0, &r.Stack0[0], len(r.Stack0), nil, nil, 0) + var u unwinder + u.initAt(pc, sp, 0, gp, unwindSilentErrors) + n := tracebackPCs(&u, 0, r.Stack0[:]) if n < len(r.Stack0) { r.Stack0[n] = 0 } diff --git a/src/runtime/proc.go b/src/runtime/proc.go index ee13debf545..533cf208380 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -4687,6 +4687,7 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { // See golang.org/issue/17165. getg().m.mallocing++ + var u unwinder var stk [maxCPUProfStack]uintptr n := 0 if mp.ncgo > 0 && mp.curg != nil && mp.curg.syscallpc != 0 && mp.curg.syscallsp != 0 { @@ -4700,26 +4701,24 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) { for cgoOff < len(mp.cgoCallers) && mp.cgoCallers[cgoOff] != 0 { cgoOff++ } - copy(stk[:], mp.cgoCallers[:cgoOff]) + n += copy(stk[:], mp.cgoCallers[:cgoOff]) mp.cgoCallers[0] = 0 } // Collect Go stack that leads to the cgo call. - n = gentraceback(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, 0, &stk[cgoOff], len(stk)-cgoOff, nil, nil, 0) - if n > 0 { - n += cgoOff - } + u.initAt(mp.curg.syscallpc, mp.curg.syscallsp, 0, mp.curg, unwindSilentErrors) } else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { // Libcall, i.e. runtime syscall on windows. // Collect Go stack that leads to the call. - n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[n], len(stk[n:]), nil, nil, 0) + u.initAt(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), unwindSilentErrors) } else if mp != nil && mp.vdsoSP != 0 { // VDSO call, e.g. nanotime1 on Linux. // Collect Go stack that leads to the call. - n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[n], len(stk[n:]), nil, nil, _TraceJumpStack) + u.initAt(mp.vdsoPC, mp.vdsoSP, 0, gp, unwindSilentErrors|unwindJumpStack) } else { - n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) + u.initAt(pc, sp, lr, gp, unwindSilentErrors|unwindTrap|unwindJumpStack) } + n += tracebackPCs(&u, 0, stk[n:]) if n <= 0 { // Normal traceback is impossible or has failed. diff --git a/src/runtime/traceback.go b/src/runtime/traceback.go index 968823316e4..b100a3c3b21 100644 --- a/src/runtime/traceback.go +++ b/src/runtime/traceback.go @@ -591,27 +591,64 @@ func (u *unwinder) cgoCallers(pcBuf []uintptr) int { return len(pcBuf) } -// Generic traceback. Handles runtime stack prints (pcbuf == nil), -// and the runtime.Callers function (pcbuf != nil). -// A little clunky to merge these, but avoids -// duplicating the code and all its subtlety. +// tracebackPCs populates pcBuf with the return addresses for each frame from u +// and returns the number of PCs written to pcBuf. The returned PCs correspond +// to "logical frames" rather than "physical frames"; that is if A is inlined +// into B, this will still return a PCs for both A and B. This also includes PCs +// generated by the cgo unwinder, if one is registered. +// +// If skip != 0, this skips this many logical frames. +// +// Callers should set the unwindSilentErrors flag on u. +func tracebackPCs(u *unwinder, skip int, pcBuf []uintptr) int { + var cgoBuf [32]uintptr + n := 0 + for ; n < len(pcBuf) && u.valid(); u.next() { + f := u.frame.fn + cgoN := u.cgoCallers(cgoBuf[:]) + + // TODO: Why does &u.cache cause u to escape? + for iu, uf := newInlineUnwinder(f, u.symPC(), noEscapePtr(&u.cache)); n < len(pcBuf) && uf.valid(); uf = iu.next(uf) { + sf := iu.srcFunc(uf) + if sf.funcID == funcID_wrapper && elideWrapperCalling(u.calleeFuncID) { + // ignore wrappers + } else if skip > 0 { + skip-- + } else { + // Callers expect the pc buffer to contain return addresses + // and do the -1 themselves, so we add 1 to the call PC to + // create a return PC. + pcBuf[n] = uf.pc + 1 + n++ + } + u.calleeFuncID = sf.funcID + } + // Add cgo frames (if we're done skipping over the requested number of + // Go frames). + if skip == 0 { + n += copy(pcBuf[n:], cgoBuf[:cgoN]) + } + } + return n +} + +// Generic traceback. Handles runtime stack prints (pcbuf == nil). // // The skip argument is only valid with pcbuf != nil and counts the number // of logical frames to skip rather than physical frames (with inlining, a // PC in pcbuf can represent multiple calls). func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max int, callback func(*stkframe, unsafe.Pointer) bool, v unsafe.Pointer, flags uint) int { + if pcbuf != nil { + throw("pcbuf argument no longer supported") + } if callback != nil { throw("callback argument no longer supported") } // Translate flags var uflags unwindFlags - printing := pcbuf == nil && callback == nil - if printing { - uflags |= unwindPrintErrors - } else { - uflags |= unwindSilentErrors - } + printing := true + uflags |= unwindPrintErrors if flags&_TraceTrap != 0 { uflags |= unwindTrap } @@ -634,33 +671,6 @@ func gentraceback(pc0, sp0, lr0 uintptr, gp *g, skip int, pcbuf *uintptr, max in cgoN := u.cgoCallers(cgoBuf[:]) - if pcbuf != nil { - // TODO: Why does cache escape? (Same below) - for iu, uf := newInlineUnwinder(f, u.symPC(), noEscapePtr(&u.cache)); uf.valid(); uf = iu.next(uf) { - sf := iu.srcFunc(uf) - if sf.funcID == funcID_wrapper && elideWrapperCalling(u.calleeFuncID) { - // ignore wrappers - } else if skip > 0 { - skip-- - } else if n < max { - // Callers expect the pc buffer to contain return addresses - // and do the -1 themselves, so we add 1 to the call PC to - // create a return PC. - (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = uf.pc + 1 - n++ - } - u.calleeFuncID = sf.funcID - } - // Add cgo frames - if skip == 0 { // skip only applies to Go frames - for i := 0; i < cgoN && n < max; i++ { - (*[1 << 20]uintptr)(unsafe.Pointer(pcbuf))[n] = cgoBuf[i] - n++ - } - } - n-- // offset n++ below - } - if printing { // assume skip=0 for printing. // @@ -981,13 +991,17 @@ func callers(skip int, pcbuf []uintptr) int { gp := getg() var n int systemstack(func() { - n = gentraceback(pc, sp, 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) + var u unwinder + u.initAt(pc, sp, 0, gp, unwindSilentErrors) + n = tracebackPCs(&u, skip, pcbuf) }) return n } func gcallers(gp *g, skip int, pcbuf []uintptr) int { - return gentraceback(^uintptr(0), ^uintptr(0), 0, gp, skip, &pcbuf[0], len(pcbuf), nil, nil, 0) + var u unwinder + u.init(gp, unwindSilentErrors) + return tracebackPCs(&u, skip, pcbuf) } // showframe reports whether the frame with the given characteristics should