1
0
mirror of https://github.com/golang/go synced 2024-11-26 07:38:00 -07:00

[dev.typeparams] Revert "[dev.typeparams] runtime: remove unnecessary split-prevention from defer code"

This reverts CL 337651.

This causes `go test -count 1000 -run TestDeferHeapAndStack runtime`
to fail with a SIGSEGV freedefer
[https://build.golang.org/log/c113b366cc6d51146db02a07b4d7dd931133efd5]
and possibly sometimes a GC bad pointer panic
[https://build.golang.org/log/5b1cef7a9ad68704e9ef3ce3ad2fefca3ba86998].

Change-Id: Ie56c274b78603c81191213b302225ae19de27fb9
Reviewed-on: https://go-review.googlesource.com/c/go/+/338710
Trust: Austin Clements <austin@google.com>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
Austin Clements 2021-07-30 16:41:11 -04:00
parent e3e9f0bb2d
commit 7bed50e667

View File

@ -261,8 +261,10 @@ func deferproc(fn func()) {
// deferprocStack queues a new deferred function with a defer record on the stack. // deferprocStack queues a new deferred function with a defer record on the stack.
// The defer record must have its fn field initialized. // The defer record must have its fn field initialized.
// All other fields can contain junk. // All other fields can contain junk.
// Nosplit because of the uninitialized pointer fields on the stack. // The defer record must be immediately followed in memory by
// // the arguments of the defer.
// Nosplit because the arguments on the stack won't be scanned
// until the defer record is spliced into the gp._defer list.
//go:nosplit //go:nosplit
func deferprocStack(d *_defer) { func deferprocStack(d *_defer) {
gp := getg() gp := getg()
@ -311,14 +313,18 @@ func newdefer() *_defer {
gp := getg() gp := getg()
pp := gp.m.p.ptr() pp := gp.m.p.ptr()
if len(pp.deferpool) == 0 && sched.deferpool != nil { if len(pp.deferpool) == 0 && sched.deferpool != nil {
lock(&sched.deferlock) // Take the slow path on the system stack so
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil { // we don't grow newdefer's stack.
d := sched.deferpool systemstack(func() {
sched.deferpool = d.link lock(&sched.deferlock)
d.link = nil for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
pp.deferpool = append(pp.deferpool, d) d := sched.deferpool
} sched.deferpool = d.link
unlock(&sched.deferlock) d.link = nil
pp.deferpool = append(pp.deferpool, d)
}
unlock(&sched.deferlock)
})
} }
if n := len(pp.deferpool); n > 0 { if n := len(pp.deferpool); n > 0 {
d = pp.deferpool[n-1] d = pp.deferpool[n-1]
@ -335,6 +341,11 @@ func newdefer() *_defer {
// Free the given defer. // Free the given defer.
// The defer cannot be used after this call. // The defer cannot be used after this call.
//
// This must not grow the stack because there may be a frame without a
// stack map when this is called.
//
//go:nosplit
func freedefer(d *_defer) { func freedefer(d *_defer) {
if d._panic != nil { if d._panic != nil {
freedeferpanic() freedeferpanic()
@ -348,23 +359,28 @@ func freedefer(d *_defer) {
pp := getg().m.p.ptr() pp := getg().m.p.ptr()
if len(pp.deferpool) == cap(pp.deferpool) { if len(pp.deferpool) == cap(pp.deferpool) {
// Transfer half of local cache to the central cache. // Transfer half of local cache to the central cache.
var first, last *_defer //
for len(pp.deferpool) > cap(pp.deferpool)/2 { // Take this slow path on the system stack so
n := len(pp.deferpool) // we don't grow freedefer's stack.
d := pp.deferpool[n-1] systemstack(func() {
pp.deferpool[n-1] = nil var first, last *_defer
pp.deferpool = pp.deferpool[:n-1] for len(pp.deferpool) > cap(pp.deferpool)/2 {
if first == nil { n := len(pp.deferpool)
first = d d := pp.deferpool[n-1]
} else { pp.deferpool[n-1] = nil
last.link = d pp.deferpool = pp.deferpool[:n-1]
if first == nil {
first = d
} else {
last.link = d
}
last = d
} }
last = d lock(&sched.deferlock)
} last.link = sched.deferpool
lock(&sched.deferlock) sched.deferpool = first
last.link = sched.deferpool unlock(&sched.deferlock)
sched.deferpool = first })
unlock(&sched.deferlock)
} }
// These lines used to be simply `*d = _defer{}` but that // These lines used to be simply `*d = _defer{}` but that
@ -404,6 +420,12 @@ func freedeferfn() {
// to have been called by the caller of deferreturn at the point // to have been called by the caller of deferreturn at the point
// just before deferreturn was called. The effect is that deferreturn // just before deferreturn was called. The effect is that deferreturn
// is called again and again until there are no more deferred functions. // is called again and again until there are no more deferred functions.
//
// Declared as nosplit, because the function should not be preempted once we start
// modifying the caller's frame in order to reuse the frame to call the deferred
// function.
//
//go:nosplit
func deferreturn() { func deferreturn() {
gp := getg() gp := getg()
d := gp._defer d := gp._defer
@ -424,6 +446,13 @@ func deferreturn() {
return return
} }
// Moving arguments around.
//
// Everything called after this point must be recursively
// nosplit because the garbage collector won't know the form
// of the arguments until the jmpdefer can flip the PC over to
// fn.
argp := getcallersp() + sys.MinFrameSize
fn := d.fn fn := d.fn
d.fn = nil d.fn = nil
gp._defer = d.link gp._defer = d.link
@ -433,9 +462,6 @@ func deferreturn() {
// called with a callback on an LR architecture and jmpdefer is on the // called with a callback on an LR architecture and jmpdefer is on the
// stack, because jmpdefer manipulates SP (see issue #8153). // stack, because jmpdefer manipulates SP (see issue #8153).
_ = **(**funcval)(unsafe.Pointer(&fn)) _ = **(**funcval)(unsafe.Pointer(&fn))
// We must not split the stack between computing argp and
// calling jmpdefer because argp is a uintptr stack pointer.
argp := getcallersp() + sys.MinFrameSize
jmpdefer(fn, argp) jmpdefer(fn, argp)
} }