1
0
mirror of https://github.com/golang/go synced 2024-09-24 05:10:13 -06:00

runtime: don't free stack spans during GC

Memory for stacks is manually managed by the runtime and, currently
(with one exception) we free stack spans immediately when the last
stack on a span is freed. However, the garbage collector assumes that
spans can never transition from non-free to free during scan or mark.
This disagreement makes it possible for the garbage collector to mark
uninitialized objects and is blocking us from re-enabling the bad
pointer test in the garbage collector (issue #9880).

For example, the following sequence will result in marking an
uninitialized object:

1. scanobject loads a pointer slot out of the object it's scanning.
   This happens to be one of the special pointers from the heap into a
   stack. Call the pointer p and suppose it points into X's stack.

2. X, running on another thread, grows its stack and frees its old
   stack.

3. The old stack happens to be large or was the last stack in its
   span, so X frees this span, setting it to state _MSpanFree.

4. The span gets reused as a heap span.

5. scanobject calls heapBitsForObject, which loads the span containing
   p, which is now in state _MSpanInUse, but doesn't necessarily have
   an object at p. The not-object at p gets marked, and at this point
   all sorts of things can go wrong.

We already have a partial solution to this. When shrinking a stack, we
put the old stack on a queue to be freed at the end of garbage
collection. This was done to address exactly this problem, but wasn't
a complete solution.

This commit generalizes this solution to both shrinking and growing
stacks. For stacks that fit in the stack pool, we simply don't free
the span, even if its reference count reaches zero. It's fine to reuse
the span for other stacks, and this enables that. At the end of GC, we
sweep for cached stack spans with a zero reference count and free
them. For larger stacks, we simply queue the stack span to be freed at
the end of GC. Ideally, we would reuse these large stack spans the way
we can small stack spans, but that's a more invasive change that will
have to wait until after the freeze.

Fixes #11267.

Change-Id: Ib7f2c5da4845cc0268e8dc098b08465116972a71
Reviewed-on: https://go-review.googlesource.com/11502
Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
Austin Clements 2015-06-22 10:24:50 -04:00
parent f73b2fca84
commit d57056ba26
3 changed files with 79 additions and 34 deletions

View File

@ -1386,7 +1386,11 @@ func gcMark(start_time int64) {
traceGCScanDone()
}
shrinkfinish()
// TODO(austin): This doesn't have to be done during STW, as
// long as we block the next GC cycle until this is done. Move
// it after we start the world, but before dropping worldsema.
// (See issue #11465.)
freeStackSpans()
cachestats()

View File

@ -75,6 +75,20 @@ var mheap_ mheap
// either one of the MHeap's free lists or one of the
// MCentral's span lists. We use empty MSpan structures as list heads.
// An MSpan representing actual memory has state _MSpanInUse,
// _MSpanStack, or _MSpanFree. Transitions between these states are
// constrained as follows:
//
// * A span may transition from free to in-use or stack during any GC
// phase.
//
// * During sweeping (gcphase == _GCoff), a span may transition from
// in-use to free (as a result of sweeping) or stack to free (as a
// result of stacks being freed).
//
// * During GC (gcphase != _GCoff), a span *must not* transition from
// stack or in-use to free. Because concurrent GC may read a pointer
// and then look up its span, the span state must be monotonic.
const (
_MSpanInUse = iota // allocated for garbage collected heap
_MSpanStack // allocated for use by stack allocator

View File

@ -44,7 +44,9 @@ const (
var stackpool [_NumStackOrders]mspan
var stackpoolmu mutex
var stackfreequeue stack
// List of stack spans to be freed at the end of GC. Protected by
// stackpoolmu.
var stackFreeQueue mspan
// Cached value of haveexperiment("framepointer")
var framepointer_enabled bool
@ -56,6 +58,7 @@ func stackinit() {
for i := range stackpool {
mSpanList_Init(&stackpool[i])
}
mSpanList_Init(&stackFreeQueue)
}
// Allocates a stack from the free pool. Must be called with
@ -108,8 +111,22 @@ func stackpoolfree(x gclinkptr, order uint8) {
x.ptr().next = s.freelist
s.freelist = x
s.ref--
if s.ref == 0 {
// span is completely free - return to heap
if gcphase == _GCoff && s.ref == 0 {
// Span is completely free. Return it to the heap
// immediately if we're sweeping.
//
// If GC is active, we delay the free until the end of
// GC to avoid the following type of situation:
//
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
// 2) The stack that pointer points to is copied
// 3) The old stack is freed
// 4) The containing span is marked free
// 5) GC attempts to mark the SudoG.elem pointer. The
// marking fails because the pointer looks like a
// pointer into a free span.
//
// By not freeing, we prevent step #4 until GC is done.
mSpanList_Remove(s)
s.freelist = 0
mHeap_FreeStack(&mheap_, s)
@ -302,7 +319,21 @@ func stackfree(stk stack, n uintptr) {
println(hex(s.start<<_PageShift), v)
throw("bad span state")
}
mHeap_FreeStack(&mheap_, s)
if gcphase == _GCoff {
// Free the stack immediately if we're
// sweeping.
mHeap_FreeStack(&mheap_, s)
} else {
// Otherwise, add it to a list of stack spans
// to be freed at the end of GC.
//
// TODO(austin): Make it possible to re-use
// these spans as stacks, like we do for small
// stack spans. (See issue #11466.)
lock(&stackpoolmu)
mSpanList_Insert(&stackFreeQueue, s)
unlock(&stackpoolmu)
}
}
}
@ -613,25 +644,7 @@ func copystack(gp *g, newsize uintptr) {
if stackPoisonCopy != 0 {
fillstack(old, 0xfc)
}
if newsize > oldsize {
// growing, free stack immediately
stackfree(old, oldsize)
} else {
// shrinking, queue up free operation. We can't actually free the stack
// just yet because we might run into the following situation:
// 1) GC starts, scans a SudoG but does not yet mark the SudoG.elem pointer
// 2) The stack that pointer points to is shrunk
// 3) The old stack is freed
// 4) The containing span is marked free
// 5) GC attempts to mark the SudoG.elem pointer. The marking fails because
// the pointer looks like a pointer into a free span.
// By not freeing, we prevent step #4 until GC is done.
lock(&stackpoolmu)
*(*stack)(unsafe.Pointer(old.lo)) = stackfreequeue
*(*uintptr)(unsafe.Pointer(old.lo + ptrSize)) = oldsize
stackfreequeue = old
unlock(&stackpoolmu)
}
stackfree(old, oldsize)
}
// round x up to a power of 2.
@ -868,18 +881,32 @@ func shrinkstack(gp *g) {
casgstatus(gp, _Gcopystack, oldstatus)
}
// Do any delayed stack freeing that was queued up during GC.
func shrinkfinish() {
// freeStackSpans frees unused stack spans at the end of GC.
func freeStackSpans() {
lock(&stackpoolmu)
s := stackfreequeue
stackfreequeue = stack{}
unlock(&stackpoolmu)
for s.lo != 0 {
t := *(*stack)(unsafe.Pointer(s.lo))
n := *(*uintptr)(unsafe.Pointer(s.lo + ptrSize))
stackfree(s, n)
s = t
// Scan stack pools for empty stack spans.
for order := range stackpool {
list := &stackpool[order]
for s := list.next; s != list; {
next := s.next
if s.ref == 0 {
mSpanList_Remove(s)
s.freelist = 0
mHeap_FreeStack(&mheap_, s)
}
s = next
}
}
// Free queued stack spans.
for stackFreeQueue.next != &stackFreeQueue {
s := stackFreeQueue.next
mSpanList_Remove(s)
mHeap_FreeStack(&mheap_, s)
}
unlock(&stackpoolmu)
}
//go:nosplit