mirror of
https://github.com/golang/go
synced 2024-11-26 07:17:59 -07:00
[dev.typeparams] Revert "[dev.typeparams] runtime: remove unnecessary split-prevention from defer code"
This reverts CL 337651. This causes `go test -count 1000 -run TestDeferHeapAndStack runtime` to fail with a SIGSEGV freedefer [https://build.golang.org/log/c113b366cc6d51146db02a07b4d7dd931133efd5] and possibly sometimes a GC bad pointer panic [https://build.golang.org/log/5b1cef7a9ad68704e9ef3ce3ad2fefca3ba86998]. Change-Id: Ie56c274b78603c81191213b302225ae19de27fb9 Reviewed-on: https://go-review.googlesource.com/c/go/+/338710 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
parent
e3e9f0bb2d
commit
7bed50e667
@ -261,8 +261,10 @@ func deferproc(fn func()) {
|
||||
// deferprocStack queues a new deferred function with a defer record on the stack.
|
||||
// The defer record must have its fn field initialized.
|
||||
// All other fields can contain junk.
|
||||
// Nosplit because of the uninitialized pointer fields on the stack.
|
||||
//
|
||||
// The defer record must be immediately followed in memory by
|
||||
// the arguments of the defer.
|
||||
// Nosplit because the arguments on the stack won't be scanned
|
||||
// until the defer record is spliced into the gp._defer list.
|
||||
//go:nosplit
|
||||
func deferprocStack(d *_defer) {
|
||||
gp := getg()
|
||||
@ -311,6 +313,9 @@ func newdefer() *_defer {
|
||||
gp := getg()
|
||||
pp := gp.m.p.ptr()
|
||||
if len(pp.deferpool) == 0 && sched.deferpool != nil {
|
||||
// Take the slow path on the system stack so
|
||||
// we don't grow newdefer's stack.
|
||||
systemstack(func() {
|
||||
lock(&sched.deferlock)
|
||||
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
|
||||
d := sched.deferpool
|
||||
@ -319,6 +324,7 @@ func newdefer() *_defer {
|
||||
pp.deferpool = append(pp.deferpool, d)
|
||||
}
|
||||
unlock(&sched.deferlock)
|
||||
})
|
||||
}
|
||||
if n := len(pp.deferpool); n > 0 {
|
||||
d = pp.deferpool[n-1]
|
||||
@ -335,6 +341,11 @@ func newdefer() *_defer {
|
||||
|
||||
// Free the given defer.
|
||||
// The defer cannot be used after this call.
|
||||
//
|
||||
// This must not grow the stack because there may be a frame without a
|
||||
// stack map when this is called.
|
||||
//
|
||||
//go:nosplit
|
||||
func freedefer(d *_defer) {
|
||||
if d._panic != nil {
|
||||
freedeferpanic()
|
||||
@ -348,6 +359,10 @@ func freedefer(d *_defer) {
|
||||
pp := getg().m.p.ptr()
|
||||
if len(pp.deferpool) == cap(pp.deferpool) {
|
||||
// Transfer half of local cache to the central cache.
|
||||
//
|
||||
// Take this slow path on the system stack so
|
||||
// we don't grow freedefer's stack.
|
||||
systemstack(func() {
|
||||
var first, last *_defer
|
||||
for len(pp.deferpool) > cap(pp.deferpool)/2 {
|
||||
n := len(pp.deferpool)
|
||||
@ -365,6 +380,7 @@ func freedefer(d *_defer) {
|
||||
last.link = sched.deferpool
|
||||
sched.deferpool = first
|
||||
unlock(&sched.deferlock)
|
||||
})
|
||||
}
|
||||
|
||||
// These lines used to be simply `*d = _defer{}` but that
|
||||
@ -404,6 +420,12 @@ func freedeferfn() {
|
||||
// to have been called by the caller of deferreturn at the point
|
||||
// just before deferreturn was called. The effect is that deferreturn
|
||||
// is called again and again until there are no more deferred functions.
|
||||
//
|
||||
// Declared as nosplit, because the function should not be preempted once we start
|
||||
// modifying the caller's frame in order to reuse the frame to call the deferred
|
||||
// function.
|
||||
//
|
||||
//go:nosplit
|
||||
func deferreturn() {
|
||||
gp := getg()
|
||||
d := gp._defer
|
||||
@ -424,6 +446,13 @@ func deferreturn() {
|
||||
return
|
||||
}
|
||||
|
||||
// Moving arguments around.
|
||||
//
|
||||
// Everything called after this point must be recursively
|
||||
// nosplit because the garbage collector won't know the form
|
||||
// of the arguments until the jmpdefer can flip the PC over to
|
||||
// fn.
|
||||
argp := getcallersp() + sys.MinFrameSize
|
||||
fn := d.fn
|
||||
d.fn = nil
|
||||
gp._defer = d.link
|
||||
@ -433,9 +462,6 @@ func deferreturn() {
|
||||
// called with a callback on an LR architecture and jmpdefer is on the
|
||||
// stack, because jmpdefer manipulates SP (see issue #8153).
|
||||
_ = **(**funcval)(unsafe.Pointer(&fn))
|
||||
// We must not split the stack between computing argp and
|
||||
// calling jmpdefer because argp is a uintptr stack pointer.
|
||||
argp := getcallersp() + sys.MinFrameSize
|
||||
jmpdefer(fn, argp)
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user