mirror of
https://github.com/golang/go
synced 2024-11-17 13:54:46 -07:00
[dev.typeparams] runtime: remove unnecessary split-prevention from defer code
Prior to regabi, the compiler passed defer arguments to the runtime as untyped values on the stack. This meant a lot of defer-related runtime functions had to be very careful not to grow the stack or allow preemption since the stack could not be safely scanned or moved. However, with regabi, every defer is now simply a func() from the runtime's perspective, which means we no longer have untyped values on the stack when we enter defer-related runtime code. Hence, this CL removes a lot of the now-unnecessary carefulness in the defer implementation. Specifically, deferreturn no longer needs to be nosplit because it doesn't copy untyped defer arguments to its caller's frame (we also update some stale comments in deferreturn). freedefer no longer needs to be nosplit because it's none of its callers are deeply nosplit. And newdefer and freedefer no longer need to switch to the systemstack on their slow paths to avoid stack growth. deferprocStack is the only function that still needs to be nosplit, but that's because the compiler calls it with uninitialized live pointer slots on the stack (maybe we should change that, but that's a very different fix). Change-Id: I1156ec90bff2613fe4b48b84b375943349ce637d Reviewed-on: https://go-review.googlesource.com/c/go/+/337651 Trust: Austin Clements <austin@google.com> Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
parent
ea94e5d3c5
commit
53fd5b1b77
@ -261,10 +261,8 @@ func deferproc(fn func()) {
|
|||||||
// deferprocStack queues a new deferred function with a defer record on the stack.
|
// deferprocStack queues a new deferred function with a defer record on the stack.
|
||||||
// The defer record must have its fn field initialized.
|
// The defer record must have its fn field initialized.
|
||||||
// All other fields can contain junk.
|
// All other fields can contain junk.
|
||||||
// The defer record must be immediately followed in memory by
|
// Nosplit because of the uninitialized pointer fields on the stack.
|
||||||
// the arguments of the defer.
|
//
|
||||||
// Nosplit because the arguments on the stack won't be scanned
|
|
||||||
// until the defer record is spliced into the gp._defer list.
|
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func deferprocStack(d *_defer) {
|
func deferprocStack(d *_defer) {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
@ -313,9 +311,6 @@ func newdefer() *_defer {
|
|||||||
gp := getg()
|
gp := getg()
|
||||||
pp := gp.m.p.ptr()
|
pp := gp.m.p.ptr()
|
||||||
if len(pp.deferpool) == 0 && sched.deferpool != nil {
|
if len(pp.deferpool) == 0 && sched.deferpool != nil {
|
||||||
// Take the slow path on the system stack so
|
|
||||||
// we don't grow newdefer's stack.
|
|
||||||
systemstack(func() {
|
|
||||||
lock(&sched.deferlock)
|
lock(&sched.deferlock)
|
||||||
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
|
for len(pp.deferpool) < cap(pp.deferpool)/2 && sched.deferpool != nil {
|
||||||
d := sched.deferpool
|
d := sched.deferpool
|
||||||
@ -324,7 +319,6 @@ func newdefer() *_defer {
|
|||||||
pp.deferpool = append(pp.deferpool, d)
|
pp.deferpool = append(pp.deferpool, d)
|
||||||
}
|
}
|
||||||
unlock(&sched.deferlock)
|
unlock(&sched.deferlock)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
if n := len(pp.deferpool); n > 0 {
|
if n := len(pp.deferpool); n > 0 {
|
||||||
d = pp.deferpool[n-1]
|
d = pp.deferpool[n-1]
|
||||||
@ -341,11 +335,6 @@ func newdefer() *_defer {
|
|||||||
|
|
||||||
// Free the given defer.
|
// Free the given defer.
|
||||||
// The defer cannot be used after this call.
|
// The defer cannot be used after this call.
|
||||||
//
|
|
||||||
// This must not grow the stack because there may be a frame without a
|
|
||||||
// stack map when this is called.
|
|
||||||
//
|
|
||||||
//go:nosplit
|
|
||||||
func freedefer(d *_defer) {
|
func freedefer(d *_defer) {
|
||||||
if d._panic != nil {
|
if d._panic != nil {
|
||||||
freedeferpanic()
|
freedeferpanic()
|
||||||
@ -359,10 +348,6 @@ func freedefer(d *_defer) {
|
|||||||
pp := getg().m.p.ptr()
|
pp := getg().m.p.ptr()
|
||||||
if len(pp.deferpool) == cap(pp.deferpool) {
|
if len(pp.deferpool) == cap(pp.deferpool) {
|
||||||
// Transfer half of local cache to the central cache.
|
// Transfer half of local cache to the central cache.
|
||||||
//
|
|
||||||
// Take this slow path on the system stack so
|
|
||||||
// we don't grow freedefer's stack.
|
|
||||||
systemstack(func() {
|
|
||||||
var first, last *_defer
|
var first, last *_defer
|
||||||
for len(pp.deferpool) > cap(pp.deferpool)/2 {
|
for len(pp.deferpool) > cap(pp.deferpool)/2 {
|
||||||
n := len(pp.deferpool)
|
n := len(pp.deferpool)
|
||||||
@ -380,7 +365,6 @@ func freedefer(d *_defer) {
|
|||||||
last.link = sched.deferpool
|
last.link = sched.deferpool
|
||||||
sched.deferpool = first
|
sched.deferpool = first
|
||||||
unlock(&sched.deferlock)
|
unlock(&sched.deferlock)
|
||||||
})
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// These lines used to be simply `*d = _defer{}` but that
|
// These lines used to be simply `*d = _defer{}` but that
|
||||||
@ -420,12 +404,6 @@ func freedeferfn() {
|
|||||||
// to have been called by the caller of deferreturn at the point
|
// to have been called by the caller of deferreturn at the point
|
||||||
// just before deferreturn was called. The effect is that deferreturn
|
// just before deferreturn was called. The effect is that deferreturn
|
||||||
// is called again and again until there are no more deferred functions.
|
// is called again and again until there are no more deferred functions.
|
||||||
//
|
|
||||||
// Declared as nosplit, because the function should not be preempted once we start
|
|
||||||
// modifying the caller's frame in order to reuse the frame to call the deferred
|
|
||||||
// function.
|
|
||||||
//
|
|
||||||
//go:nosplit
|
|
||||||
func deferreturn() {
|
func deferreturn() {
|
||||||
gp := getg()
|
gp := getg()
|
||||||
d := gp._defer
|
d := gp._defer
|
||||||
@ -446,13 +424,6 @@ func deferreturn() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Moving arguments around.
|
|
||||||
//
|
|
||||||
// Everything called after this point must be recursively
|
|
||||||
// nosplit because the garbage collector won't know the form
|
|
||||||
// of the arguments until the jmpdefer can flip the PC over to
|
|
||||||
// fn.
|
|
||||||
argp := getcallersp() + sys.MinFrameSize
|
|
||||||
fn := d.fn
|
fn := d.fn
|
||||||
d.fn = nil
|
d.fn = nil
|
||||||
gp._defer = d.link
|
gp._defer = d.link
|
||||||
@ -462,6 +433,9 @@ func deferreturn() {
|
|||||||
// called with a callback on an LR architecture and jmpdefer is on the
|
// called with a callback on an LR architecture and jmpdefer is on the
|
||||||
// stack, because jmpdefer manipulates SP (see issue #8153).
|
// stack, because jmpdefer manipulates SP (see issue #8153).
|
||||||
_ = **(**funcval)(unsafe.Pointer(&fn))
|
_ = **(**funcval)(unsafe.Pointer(&fn))
|
||||||
|
// We must not split the stack between computing argp and
|
||||||
|
// calling jmpdefer because argp is a uintptr stack pointer.
|
||||||
|
argp := getcallersp() + sys.MinFrameSize
|
||||||
jmpdefer(fn, argp)
|
jmpdefer(fn, argp)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user