1
0
mirror of https://github.com/golang/go synced 2024-11-18 04:34:52 -07:00

runtime: prioritize VDSO and libcall unwinding in profiler

In the profiler, when unwinding the stack, we have special
handling for VDSO calls. Currently, the special handling is only
used when the normal unwinding fails. If the signal lands in the
function that makes the VDSO call (e.g. nanotime1) and after the
stack switch, the normal unwinding doesn't fail but gets a stack
trace with exactly one frame (the nanotime1 frame). The stack
trace stops because of the stack switch. This 1-frame stack trace
is not as helpful. Instead, if vdsoSP is set, we know we are in
VDSO call or right before or after it, so use vdsoPC and vdsoSP
for unwinding. Do the same for libcall.

Also remove _TraceTrap for VDSO unwinding, as vdsoPC and vdsoSP
correspond to a call, not an interrupted instruction.

Fixes #56574.

Change-Id: I799aa7644d0c1e2715ab038a9eef49481dd3a7f5
Reviewed-on: https://go-review.googlesource.com/c/go/+/455166
Run-TryBot: Cherry Mui <cherryyz@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Cherry Mui 2022-12-05 13:02:22 -05:00
parent a5b10be471
commit 185e1a7b27

View File

@ -4719,37 +4719,33 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
if n > 0 { if n > 0 {
n += cgoOff n += cgoOff
} }
} else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
// Libcall, i.e. runtime syscall on windows.
// Collect Go stack that leads to the call.
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[n], len(stk[n:]), nil, nil, 0)
} else if mp != nil && mp.vdsoSP != 0 {
// VDSO call, e.g. nanotime1 on Linux.
// Collect Go stack that leads to the call.
n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[n], len(stk[n:]), nil, nil, _TraceJumpStack)
} else { } else {
n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
} }
if n <= 0 { if n <= 0 {
// Normal traceback is impossible or has failed. // Normal traceback is impossible or has failed.
// See if it falls into several common cases. // Account it against abstract "System" or "GC".
n = 0 n = 2
if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 { if inVDSOPage(pc) {
// Libcall, i.e. runtime syscall on windows. pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
// Collect Go stack that leads to the call. } else if pc > firstmoduledata.etext {
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0) // "ExternalCode" is better than "etext".
pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
} }
if n == 0 && mp != nil && mp.vdsoSP != 0 { stk[0] = pc
n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack) if mp.preemptoff != "" {
} stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
if n == 0 { } else {
// If all of the above has failed, account it against abstract "System" or "GC". stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
n = 2
if inVDSOPage(pc) {
pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
} else if pc > firstmoduledata.etext {
// "ExternalCode" is better than "etext".
pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" {
stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
} }
} }