1
0
mirror of https://github.com/golang/go synced 2024-11-17 12:04:43 -07:00

[dev.typeparams] runtime: remove unnecessary split-prevention from defer code

Prior to regabi, the compiler passed defer arguments to the runtime as
untyped values on the stack. This meant a lot of defer-related runtime
functions had to be very careful not to grow the stack or allow
preemption since the stack could not be safely scanned or moved.
However, with regabi, every defer is now simply a func() from the
runtime's perspective, which means we no longer have untyped values on
the stack when we enter defer-related runtime code.

Hence, this CL removes a lot of the now-unnecessary carefulness in the
defer implementation. Specifically, deferreturn no longer needs to be
nosplit because it doesn't copy untyped defer arguments to its
caller's frame (we also update some stale comments in deferreturn).
freedefer no longer needs to be nosplit because it's none of its
callers are deeply nosplit. And newdefer and freedefer no longer need
to switch to the systemstack on their slow paths to avoid stack
growth.

deferprocStack is the only function that still needs to be nosplit,
but that's because the compiler calls it with uninitialized live
pointer slots on the stack (maybe we should change that, but that's a
very different fix).

Change-Id: I1156ec90bff2613fe4b48b84b375943349ce637d
Reviewed-on: https://go-review.googlesource.com/c/go/+/337651
Trust: Austin Clements <austin@google.com>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
Austin Clements 2021-07-26 12:04:36 -04:00
parent ea94e5d3c5
commit 53fd5b1b77

View File

@ -261,10 +261,8 @@ func deferproc(fn func()) {
// deferprocStack queues a new deferred function with a defer record on the stack.
// The defer record must have its fn field initialized.
// All other fields can contain junk.
// The defer record must be immediately followed in memory by
// the arguments of the defer.
// Nosplit because the arguments on the stack won't be scanned
// until the defer record is spliced into the gp._defer list.
// Nosplit because of the uninitialized pointer fields on the stack.
//
//go:nosplit
func deferprocStack(d *_defer) {
gp := getg()
@ -313,18 +311,14 @@ func newdefer() *_defer {
gp := getg()
pp := gp.m.p.ptr()
if len(pp.deferpool) == 0 && sched.deferpool != nil {
// Take the slow path on the system stack so
// we don't grow newdefer's stack.
systemstack(func() {
lock(&sched.deferlock)
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
d := sched.deferpool
sched.deferpool = d.link
d.link = nil
pp.deferpool = append(pp.deferpool, d)
}
unlock(&sched.deferlock)
})
lock(&sched.deferlock)
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
d := sched.deferpool
sched.deferpool = d.link
d.link = nil
pp.deferpool = append(pp.deferpool, d)
}
unlock(&sched.deferlock)
}
if n := len(pp.deferpool); n > 0 {
d = pp.deferpool[n-1]
@ -341,11 +335,6 @@ func newdefer() *_defer {
// Free the given defer.
// The defer cannot be used after this call.
//
// This must not grow the stack because there may be a frame without a
// stack map when this is called.
//
//go:nosplit
func freedefer(d *_defer) {
if d._panic != nil {
freedeferpanic()
@ -359,28 +348,23 @@ func freedefer(d *_defer) {
pp := getg().m.p.ptr()
if len(pp.deferpool) == cap(pp.deferpool) {
// Transfer half of local cache to the central cache.
//
// Take this slow path on the system stack so
// we don't grow freedefer's stack.
systemstack(func() {
var first, last *_defer
for len(pp.deferpool) > cap(pp.deferpool)/2 {
n := len(pp.deferpool)
d := pp.deferpool[n-1]
pp.deferpool[n-1] = nil
pp.deferpool = pp.deferpool[:n-1]
if first == nil {
first = d
} else {
last.link = d
}
last = d
var first, last *_defer
for len(pp.deferpool) > cap(pp.deferpool)/2 {
n := len(pp.deferpool)
d := pp.deferpool[n-1]
pp.deferpool[n-1] = nil
pp.deferpool = pp.deferpool[:n-1]
if first == nil {
first = d
} else {
last.link = d
}
lock(&sched.deferlock)
last.link = sched.deferpool
sched.deferpool = first
unlock(&sched.deferlock)
})
last = d
}
lock(&sched.deferlock)
last.link = sched.deferpool
sched.deferpool = first
unlock(&sched.deferlock)
}
// These lines used to be simply `*d = _defer{}` but that
@ -420,12 +404,6 @@ func freedeferfn() {
// to have been called by the caller of deferreturn at the point
// just before deferreturn was called. The effect is that deferreturn
// is called again and again until there are no more deferred functions.
//
// Declared as nosplit, because the function should not be preempted once we start
// modifying the caller's frame in order to reuse the frame to call the deferred
// function.
//
//go:nosplit
func deferreturn() {
gp := getg()
d := gp._defer
@ -446,13 +424,6 @@ func deferreturn() {
return
}
// Moving arguments around.
//
// Everything called after this point must be recursively
// nosplit because the garbage collector won't know the form
// of the arguments until the jmpdefer can flip the PC over to
// fn.
argp := getcallersp() + sys.MinFrameSize
fn := d.fn
d.fn = nil
gp._defer = d.link
@ -462,6 +433,9 @@ func deferreturn() {
// called with a callback on an LR architecture and jmpdefer is on the
// stack, because jmpdefer manipulates SP (see issue #8153).
_ = **(**funcval)(unsafe.Pointer(&fn))
// We must not split the stack between computing argp and
// calling jmpdefer because argp is a uintptr stack pointer.
argp := getcallersp() + sys.MinFrameSize
jmpdefer(fn, argp)
}