mirror of
https://github.com/golang/go
synced 2024-11-07 15:46:23 -07:00
runtime: traceback from outermost libc call
If we're in a libc call and get a trap, don't try to traceback the libc call. Start from the state we had at entry to libc. If there are multiple libc calls outstanding, remember the outermost one. Fixes #26393 Change-Id: Icfe8794b95bf3bfd1a0679b456dcde2481dcabf3 Reviewed-on: https://go-review.googlesource.com/124195 Reviewed-by: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org>
This commit is contained in:
parent
5fc70b6fac
commit
fe68ab3bcd
@ -37,12 +37,14 @@ func sysvicall0(fn *libcFunc) uintptr {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
mp = nil // See comment in sys_darwin.go:libcCall
|
||||
}
|
||||
|
||||
var libcall libcall
|
||||
@ -64,12 +66,14 @@ func sysvicall1(fn *libcFunc, a1 uintptr) uintptr {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
mp = nil
|
||||
}
|
||||
|
||||
var libcall libcall
|
||||
@ -92,12 +96,14 @@ func sysvicall2(fn *libcFunc, a1, a2 uintptr) uintptr {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
mp = nil
|
||||
}
|
||||
|
||||
var libcall libcall
|
||||
@ -119,12 +125,14 @@ func sysvicall3(fn *libcFunc, a1, a2, a3 uintptr) uintptr {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
mp = nil
|
||||
}
|
||||
|
||||
var libcall libcall
|
||||
@ -146,12 +154,14 @@ func sysvicall4(fn *libcFunc, a1, a2, a3, a4 uintptr) uintptr {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
mp = nil
|
||||
}
|
||||
|
||||
var libcall libcall
|
||||
@ -173,12 +183,14 @@ func sysvicall5(fn *libcFunc, a1, a2, a3, a4, a5 uintptr) uintptr {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
mp = nil
|
||||
}
|
||||
|
||||
var libcall libcall
|
||||
@ -200,12 +212,14 @@ func sysvicall6(fn *libcFunc, a1, a2, a3, a4, a5, a6 uintptr) uintptr {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
mp = nil
|
||||
}
|
||||
|
||||
var libcall libcall
|
||||
|
@ -734,17 +734,20 @@ func stdcall(fn stdFunction) uintptr {
|
||||
gp := getg()
|
||||
mp := gp.m
|
||||
mp.libcall.fn = uintptr(unsafe.Pointer(fn))
|
||||
|
||||
if mp.profilehz != 0 {
|
||||
resetLibcall := false
|
||||
if mp.profilehz != 0 && mp.libcallsp == 0 {
|
||||
// leave pc/sp for cpu profiler
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
resetLibcall = true // See comment in sys_darwin.go:libcCall
|
||||
}
|
||||
asmcgocall(asmstdcallAddr, unsafe.Pointer(&mp.libcall))
|
||||
mp.libcallsp = 0
|
||||
if resetLibcall {
|
||||
mp.libcallsp = 0
|
||||
}
|
||||
return mp.libcall.r1
|
||||
}
|
||||
|
||||
|
@ -18,12 +18,30 @@ func libcCall(fn, arg unsafe.Pointer) int32 {
|
||||
if gp != nil {
|
||||
mp = gp.m
|
||||
}
|
||||
if mp != nil {
|
||||
if mp != nil && mp.libcallsp == 0 {
|
||||
mp.libcallg.set(gp)
|
||||
mp.libcallpc = getcallerpc()
|
||||
// sp must be the last, because once async cpu profiler finds
|
||||
// all three values to be non-zero, it will use them
|
||||
mp.libcallsp = getcallersp()
|
||||
} else {
|
||||
// Make sure we don't reset libcallsp. This makes
|
||||
// libcCall reentrant; We remember the g/pc/sp for the
|
||||
// first call on an M, until that libcCall instance
|
||||
// returns. Reentrance only matters for signals, as
|
||||
// libc never calls back into Go. The tricky case is
|
||||
// where we call libcX from an M and record g/pc/sp.
|
||||
// Before that call returns, a signal arrives on the
|
||||
// same M and the signal handling code calls another
|
||||
// libc function. We don't want that second libcCall
|
||||
// from within the handler to be recorded, and we
|
||||
// don't want that call's completion to zero
|
||||
// libcallsp.
|
||||
// We don't need to set libcall* while we're in a sighandler
|
||||
// (even if we're not currently in libc) because we block all
|
||||
// signals while we're handling a signal. That includes the
|
||||
// profile signal, which is the one that uses the libcall* info.
|
||||
mp = nil
|
||||
}
|
||||
res := asmcgocall(fn, arg)
|
||||
if mp != nil {
|
||||
|
@ -679,7 +679,14 @@ func traceback(pc, sp, lr uintptr, gp *g) {
|
||||
// the initial PC must not be rewound to the previous instruction.
|
||||
// (All the saved pairs record a PC that is a return address, so we
|
||||
// rewind it into the CALL instruction.)
|
||||
// If gp.m.libcall{g,pc,sp} information is available, it uses that information in preference to
|
||||
// the pc/sp/lr passed in.
|
||||
func tracebacktrap(pc, sp, lr uintptr, gp *g) {
|
||||
if gp.m.libcallsp != 0 {
|
||||
// We're in C code somewhere, traceback from the saved position.
|
||||
traceback1(gp.m.libcallpc, gp.m.libcallsp, 0, gp.m.libcallg.ptr(), 0)
|
||||
return
|
||||
}
|
||||
traceback1(pc, sp, lr, gp, _TraceTrap)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user