1
0
mirror of https://github.com/golang/go synced 2024-11-18 04:24:47 -07:00

runtime: prioritize VDSO and libcall unwinding in profiler

In the profiler, when unwinding the stack, we have special
handling for VDSO calls. Currently, the special handling is only
used when the normal unwinding fails. If the signal lands in the
function that makes the VDSO call (e.g. nanotime1) and after the
stack switch, the normal unwinding doesn't fail but gets a stack
trace with exactly one frame (the nanotime1 frame). The stack
trace stops because of the stack switch. This 1-frame stack trace
is not as helpful. Instead, if vdsoSP is set, we know we are in
VDSO call or right before or after it, so use vdsoPC and vdsoSP
for unwinding. Do the same for libcall.

Also remove _TraceTrap for VDSO unwinding, as vdsoPC and vdsoSP
correspond to a call, not an interrupted instruction.

Fixes #56574.

Change-Id: I799aa7644d0c1e2715ab038a9eef49481dd3a7f5
Reviewed-on: https://go-review.googlesource.com/c/go/+/455166
Run-TryBot: Cherry Mui <cherryyz@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Cherry Mui 2022-12-05 13:02:22 -05:00
parent a5b10be471
commit 185e1a7b27

View File

@ -4719,37 +4719,33 @@ func sigprof(pc, sp, lr uintptr, gp *g, mp *m) {
if n > 0 {
n += cgoOff
}
} else if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
// Libcall, i.e. runtime syscall on windows.
// Collect Go stack that leads to the call.
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[n], len(stk[n:]), nil, nil, 0)
} else if mp != nil && mp.vdsoSP != 0 {
// VDSO call, e.g. nanotime1 on Linux.
// Collect Go stack that leads to the call.
n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[n], len(stk[n:]), nil, nil, _TraceJumpStack)
} else {
n = gentraceback(pc, sp, lr, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
}
if n <= 0 {
// Normal traceback is impossible or has failed.
// See if it falls into several common cases.
n = 0
if usesLibcall() && mp.libcallg != 0 && mp.libcallpc != 0 && mp.libcallsp != 0 {
// Libcall, i.e. runtime syscall on windows.
// Collect Go stack that leads to the call.
n = gentraceback(mp.libcallpc, mp.libcallsp, 0, mp.libcallg.ptr(), 0, &stk[0], len(stk), nil, nil, 0)
// Account it against abstract "System" or "GC".
n = 2
if inVDSOPage(pc) {
pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
} else if pc > firstmoduledata.etext {
// "ExternalCode" is better than "etext".
pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
}
if n == 0 && mp != nil && mp.vdsoSP != 0 {
n = gentraceback(mp.vdsoPC, mp.vdsoSP, 0, gp, 0, &stk[0], len(stk), nil, nil, _TraceTrap|_TraceJumpStack)
}
if n == 0 {
// If all of the above has failed, account it against abstract "System" or "GC".
n = 2
if inVDSOPage(pc) {
pc = abi.FuncPCABIInternal(_VDSO) + sys.PCQuantum
} else if pc > firstmoduledata.etext {
// "ExternalCode" is better than "etext".
pc = abi.FuncPCABIInternal(_ExternalCode) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" {
stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
stk[0] = pc
if mp.preemptoff != "" {
stk[1] = abi.FuncPCABIInternal(_GC) + sys.PCQuantum
} else {
stk[1] = abi.FuncPCABIInternal(_System) + sys.PCQuantum
}
}