mirror of
https://github.com/golang/go
synced 2024-11-17 20:14:45 -07:00
runtime: always keep global reference to mp until mexit completes
Ms are allocated via standard heap allocation (`new(m)`), which means we must keep them alive (i.e., reachable by the GC) until we are completely done using them. Ms are primarily reachable through runtime.allm. However, runtime.mexit drops the M from allm fairly early, long before it is done using the M structure. If that was the last reference to the M, it is now at risk of being freed by the GC and used for some other allocation, leading to memory corruption. Ms with a Go-allocated stack coincidentally already keep a reference to the M in sched.freem, so that the stack can be freed lazily. This reference has the side effect of keeping this Ms reachable. However, Ms with an OS stack skip this and are at risk of corruption. Fix this lifetime by extending sched.freem use to all Ms, with the value of mp.freeWait determining whether the stack needs to be freed or not. Fixes #56243. Change-Id: Ic0c01684775f5646970df507111c9abaac0ba52e Reviewed-on: https://go-review.googlesource.com/c/go/+/443716 TryBot-Result: Gopher Robot <gobot@golang.org> Run-TryBot: Michael Pratt <mpratt@google.com> Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
parent
a8e4b8c2a7
commit
e252dcf9d3
@ -7,6 +7,7 @@ package runtime
|
||||
import (
|
||||
"internal/abi"
|
||||
"internal/goarch"
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -182,7 +183,7 @@ func newosproc(mp *m) {
|
||||
}
|
||||
}
|
||||
|
||||
func exitThread(wait *uint32) {
|
||||
func exitThread(wait *atomic.Uint32) {
|
||||
// We should never reach exitThread on Solaris because we let
|
||||
// libc clean up threads.
|
||||
throw("exitThread")
|
||||
|
@ -8,6 +8,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -233,7 +234,7 @@ func newosproc(mp *m) {
|
||||
|
||||
}
|
||||
|
||||
func exitThread(wait *uint32) {
|
||||
func exitThread(wait *atomic.Uint32) {
|
||||
// We should never reach exitThread on AIX because we let
|
||||
// libc clean up threads.
|
||||
throw("exitThread")
|
||||
|
@ -7,6 +7,7 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -35,7 +36,7 @@ func usleep_no_g(usec uint32) {
|
||||
usleep(usec)
|
||||
}
|
||||
|
||||
func exitThread(wait *uint32)
|
||||
func exitThread(wait *atomic.Uint32)
|
||||
|
||||
type mOS struct{}
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
package runtime
|
||||
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -49,11 +50,11 @@ func open(name *byte, mode, perm int32) int32
|
||||
// return value is only set on linux to be used in osinit()
|
||||
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
|
||||
|
||||
// exitThread terminates the current thread, writing *wait = 0 when
|
||||
// exitThread terminates the current thread, writing *wait = freeMStack when
|
||||
// the stack is safe to reclaim.
|
||||
//
|
||||
//go:noescape
|
||||
func exitThread(wait *uint32)
|
||||
func exitThread(wait *atomic.Uint32)
|
||||
|
||||
//go:noescape
|
||||
func obsdsigprocmask(how int32, new sigset) sigset
|
||||
|
@ -468,7 +468,7 @@ func newosproc(mp *m) {
|
||||
}
|
||||
}
|
||||
|
||||
func exitThread(wait *uint32) {
|
||||
func exitThread(wait *atomic.Uint32) {
|
||||
// We should never reach exitThread on Plan 9 because we let
|
||||
// the OS clean up threads.
|
||||
throw("exitThread")
|
||||
|
@ -941,7 +941,7 @@ func newosproc0(mp *m, stk unsafe.Pointer) {
|
||||
throw("bad newosproc0")
|
||||
}
|
||||
|
||||
func exitThread(wait *uint32) {
|
||||
func exitThread(wait *atomic.Uint32) {
|
||||
// We should never reach exitThread on Windows because we let
|
||||
// the OS clean up threads.
|
||||
throw("exitThread")
|
||||
|
@ -1582,19 +1582,18 @@ func mexit(osStack bool) {
|
||||
}
|
||||
throw("m not found in allm")
|
||||
found:
|
||||
if !osStack {
|
||||
// Delay reaping m until it's done with the stack.
|
||||
//
|
||||
// If this is using an OS stack, the OS will free it
|
||||
// so there's no need for reaping.
|
||||
atomic.Store(&mp.freeWait, 1)
|
||||
// Put m on the free list, though it will not be reaped until
|
||||
// freeWait is 0. Note that the free list must not be linked
|
||||
// through alllink because some functions walk allm without
|
||||
// locking, so may be using alllink.
|
||||
mp.freelink = sched.freem
|
||||
sched.freem = mp
|
||||
}
|
||||
// Delay reaping m until it's done with the stack.
|
||||
//
|
||||
// Put mp on the free list, though it will not be reaped while freeWait
|
||||
// is freeMWait. mp is no longer reachable via allm, so even if it is
|
||||
// on an OS stack, we must keep a reference to mp alive so that the GC
|
||||
// doesn't free mp while we are still using it.
|
||||
//
|
||||
// Note that the free list must not be linked through alllink because
|
||||
// some functions walk allm without locking, so may be using alllink.
|
||||
mp.freeWait.Store(freeMWait)
|
||||
mp.freelink = sched.freem
|
||||
sched.freem = mp
|
||||
unlock(&sched.lock)
|
||||
|
||||
atomic.Xadd64(&ncgocall, int64(mp.ncgocall))
|
||||
@ -1624,6 +1623,9 @@ found:
|
||||
mdestroy(mp)
|
||||
|
||||
if osStack {
|
||||
// No more uses of mp, so it is safe to drop the reference.
|
||||
mp.freeWait.Store(freeMRef)
|
||||
|
||||
// Return from mstart and let the system thread
|
||||
// library free the g0 stack and terminate the thread.
|
||||
return
|
||||
@ -1795,19 +1797,25 @@ func allocm(pp *p, fn func(), id int64) *m {
|
||||
lock(&sched.lock)
|
||||
var newList *m
|
||||
for freem := sched.freem; freem != nil; {
|
||||
if freem.freeWait != 0 {
|
||||
wait := freem.freeWait.Load()
|
||||
if wait == freeMWait {
|
||||
next := freem.freelink
|
||||
freem.freelink = newList
|
||||
newList = freem
|
||||
freem = next
|
||||
continue
|
||||
}
|
||||
// stackfree must be on the system stack, but allocm is
|
||||
// reachable off the system stack transitively from
|
||||
// startm.
|
||||
systemstack(func() {
|
||||
stackfree(freem.g0.stack)
|
||||
})
|
||||
// Free the stack if needed. For freeMRef, there is
|
||||
// nothing to do except drop freem from the sched.freem
|
||||
// list.
|
||||
if wait == freeMStack {
|
||||
// stackfree must be on the system stack, but allocm is
|
||||
// reachable off the system stack transitively from
|
||||
// startm.
|
||||
systemstack(func() {
|
||||
stackfree(freem.g0.stack)
|
||||
})
|
||||
}
|
||||
freem = freem.freelink
|
||||
}
|
||||
sched.freem = newList
|
||||
|
@ -516,6 +516,13 @@ const (
|
||||
tlsSize = tlsSlots * goarch.PtrSize
|
||||
)
|
||||
|
||||
// Values for m.freeWait.
|
||||
const (
|
||||
freeMStack = 0 // M done, free stack and reference.
|
||||
freeMRef = 1 // M done, free reference.
|
||||
freeMWait = 2 // M still in use.
|
||||
)
|
||||
|
||||
type m struct {
|
||||
g0 *g // goroutine with scheduling stack
|
||||
morebuf gobuf // gobuf arg to morestack
|
||||
@ -547,7 +554,7 @@ type m struct {
|
||||
printlock int8
|
||||
incgo bool // m is executing a cgo call
|
||||
isextra bool // m is an extra m
|
||||
freeWait uint32 // if == 0, safe to free g0 and delete m (atomic)
|
||||
freeWait atomic.Uint32 // Whether it is safe to free g0 and delete m (one of freeMRef, freeMStack, freeMWait)
|
||||
fastrand uint64
|
||||
needextram bool
|
||||
traceback uint8
|
||||
|
@ -6,7 +6,10 @@
|
||||
|
||||
package runtime
|
||||
|
||||
import "unsafe"
|
||||
import (
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// read calls the read system call.
|
||||
// It returns a non-negative number of bytes written or a negative errno value.
|
||||
@ -34,8 +37,8 @@ func open(name *byte, mode, perm int32) int32
|
||||
// return value is only set on linux to be used in osinit()
|
||||
func madvise(addr unsafe.Pointer, n uintptr, flags int32) int32
|
||||
|
||||
// exitThread terminates the current thread, writing *wait = 0 when
|
||||
// exitThread terminates the current thread, writing *wait = freeMStack when
|
||||
// the stack is safe to reclaim.
|
||||
//
|
||||
//go:noescape
|
||||
func exitThread(wait *uint32)
|
||||
func exitThread(wait *atomic.Uint32)
|
||||
|
@ -6,6 +6,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -474,7 +475,7 @@ func pthread_cond_signal(c *pthreadcond) int32 {
|
||||
func pthread_cond_signal_trampoline()
|
||||
|
||||
// Not used on Darwin, but must be defined.
|
||||
func exitThread(wait *uint32) {
|
||||
func exitThread(wait *atomic.Uint32) {
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
|
@ -65,7 +65,7 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
|
||||
MOVL $0xf1, 0xf1 // crash
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
|
||||
MOVQ wait+0(FP), AX
|
||||
// We're done using the stack.
|
||||
|
@ -99,7 +99,7 @@ GLOBL exitStack<>(SB),RODATA,$8
|
||||
DATA exitStack<>+0x00(SB)/4, $0
|
||||
DATA exitStack<>+0x04(SB)/4, $0
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
|
||||
MOVL wait+0(FP), AX
|
||||
// We're done using the stack.
|
||||
|
@ -96,7 +96,7 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
|
||||
MOVL $0xf1, 0xf1 // crash
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
|
||||
MOVQ wait+0(FP), AX
|
||||
// We're done using the stack.
|
||||
|
@ -85,7 +85,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0
|
||||
MOVW.CS R8, (R8)
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
|
||||
MOVW wait+0(FP), R0
|
||||
// We're done using the stack.
|
||||
|
@ -99,7 +99,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
MOVD $0, R0
|
||||
MOVD R0, (R0)
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVD wait+0(FP), R0
|
||||
// We're done using the stack.
|
||||
|
@ -96,7 +96,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
ECALL
|
||||
WORD $0 // crash
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOV wait+0(FP), A0
|
||||
// We're done using the stack.
|
||||
|
@ -72,7 +72,7 @@ TEXT exit1<>(SB),NOSPLIT,$0
|
||||
INT $3 // not reached
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
|
||||
MOVL wait+0(FP), AX
|
||||
// We're done using the stack.
|
||||
|
@ -54,7 +54,7 @@ TEXT runtime·exit(SB),NOSPLIT,$0-4
|
||||
SYSCALL
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
|
||||
MOVQ wait+0(FP), AX
|
||||
// We're done using the stack.
|
||||
|
@ -117,7 +117,7 @@ TEXT exit1<>(SB),NOSPLIT|NOFRAME,$0
|
||||
MOVW $1003, R1
|
||||
MOVW R0, (R1) // fail hard
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-4
|
||||
MOVW wait+0(FP), R0
|
||||
// We're done using the stack.
|
||||
|
@ -56,7 +56,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
SVC
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVD wait+0(FP), R0
|
||||
// We're done using the stack.
|
||||
|
@ -49,7 +49,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
SYSCALL
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVV wait+0(FP), R19
|
||||
// We're done using the stack.
|
||||
|
@ -51,7 +51,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
SYSCALL
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVV wait+0(FP), R1
|
||||
// We're done using the stack.
|
||||
|
@ -50,7 +50,7 @@ TEXT runtime·exit(SB),NOSPLIT,$0-4
|
||||
UNDEF
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
|
||||
MOVW wait+0(FP), R1
|
||||
// We're done using the stack.
|
||||
|
@ -49,7 +49,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
SYSCALL $SYS_exit_group
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVD wait+0(FP), R1
|
||||
// We're done using the stack.
|
||||
|
@ -57,7 +57,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
ECALL
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOV wait+0(FP), A0
|
||||
// We're done using the stack.
|
||||
|
@ -46,7 +46,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0-4
|
||||
SYSCALL
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT|NOFRAME,$0-8
|
||||
MOVD wait+0(FP), R1
|
||||
// We're done using the stack.
|
||||
|
@ -53,7 +53,7 @@ TEXT runtime·exit(SB),NOSPLIT,$-4
|
||||
MOVL $0xf1, 0xf1 // crash
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
|
||||
MOVL wait+0(FP), AX
|
||||
// We're done using the stack.
|
||||
|
@ -122,7 +122,7 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
|
||||
MOVL $0xf1, 0xf1 // crash
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
|
||||
MOVQ wait+0(FP), AX
|
||||
// We're done using the stack.
|
||||
|
@ -56,7 +56,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0
|
||||
MOVW.CS R8, (R8)
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-4
|
||||
MOVW wait+0(FP), R0
|
||||
// We're done using the stack.
|
||||
|
@ -115,7 +115,7 @@ TEXT runtime·exit(SB),NOSPLIT,$-8
|
||||
MOVD $0, R0 // If we're still running,
|
||||
MOVD R0, (R0) // crash
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0-8
|
||||
MOVD wait+0(FP), R0
|
||||
// We're done using the stack.
|
||||
|
@ -8,6 +8,7 @@ package runtime
|
||||
|
||||
import (
|
||||
"internal/abi"
|
||||
"runtime/internal/atomic"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
@ -248,7 +249,7 @@ func sigaltstack(new *stackt, old *stackt) {
|
||||
func sigaltstack_trampoline()
|
||||
|
||||
// Not used on OpenBSD, but must be defined.
|
||||
func exitThread(wait *uint32) {
|
||||
func exitThread(wait *atomic.Uint32) {
|
||||
}
|
||||
|
||||
//go:nosplit
|
||||
|
@ -24,7 +24,7 @@ TEXT runtime·exit(SB),NOSPLIT|NOFRAME,$0
|
||||
MOVV R2, (R2)
|
||||
RET
|
||||
|
||||
// func exitThread(wait *uint32)
|
||||
// func exitThread(wait *atomic.Uint32)
|
||||
TEXT runtime·exitThread(SB),NOSPLIT,$0
|
||||
MOVV wait+0(FP), R4 // arg 1 - notdead
|
||||
MOVV $302, R2 // sys___threxit
|
||||
|
Loading…
Reference in New Issue
Block a user