1
0
mirror of https://github.com/golang/go synced 2024-09-30 06:24:33 -06:00

runtime: recycle large stack spans

To prevent races with the garbage collector, stack spans cannot be
reused as heap spans during a GC. We deal with this by caching stack
spans during GC and releasing them at the end of mark termination.
However, while our cache lets us reuse small stack spans, currently
large stack spans are *not* reused. This can cause significant memory
growth in programs that allocate large stacks rapidly, but grow the
heap slowly (such as in issue #13552).

Fix this by adding logic to reuse large stack spans for other stacks.

Fixes #11466.

Fixes #13552. Without this change, the program in this issue creeps to
over 1GB of memory over the course of a few hours. With this change,
it stays rock solid at around 30MB.

Change-Id: If8b2d85464aa80c96230a1990715e39aa803904f
Reviewed-on: https://go-review.googlesource.com/17814
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Austin Clements 2015-12-14 14:30:25 -05:00
parent 9917165083
commit 0cbf8d13a7

View File

@ -149,9 +149,11 @@ const (
var stackpool [_NumStackOrders]mSpanList
var stackpoolmu mutex
// List of stack spans to be freed at the end of GC. Protected by
// stackpoolmu.
var stackFreeQueue mSpanList
// Global pool of large stack spans.
var stackLarge struct {
lock mutex
free [_MHeapMap_Bits]mSpanList // free lists by log_2(s.npages)
}
// Cached value of haveexperiment("framepointer")
var framepointer_enabled bool
@ -163,7 +165,19 @@ func stackinit() {
for i := range stackpool {
stackpool[i].init()
}
stackFreeQueue.init()
for i := range stackLarge.free {
stackLarge.free[i].init()
}
}
// stacklog2 returns ⌊log_2(n)⌋.
func stacklog2(n uintptr) int {
log2 := 0
for n > 1 {
n >>= 1
log2++
}
return log2
}
// Allocates a stack from the free pool. Must be called with
@ -358,9 +372,24 @@ func stackalloc(n uint32) (stack, []stkbar) {
}
v = unsafe.Pointer(x)
} else {
s := mheap_.allocStack(round(uintptr(n), _PageSize) >> _PageShift)
var s *mspan
npage := uintptr(n) >> _PageShift
log2npage := stacklog2(npage)
// Try to get a stack from the large stack cache.
lock(&stackLarge.lock)
if !stackLarge.free[log2npage].isEmpty() {
s = stackLarge.free[log2npage].first
stackLarge.free[log2npage].remove(s)
}
unlock(&stackLarge.lock)
if s == nil {
throw("out of memory")
// Allocate a new stack from the heap.
s = mheap_.allocStack(npage)
if s == nil {
throw("out of memory")
}
}
v = unsafe.Pointer(s.start << _PageShift)
}
@ -435,15 +464,15 @@ func stackfree(stk stack, n uintptr) {
// sweeping.
mheap_.freeStack(s)
} else {
// Otherwise, add it to a list of stack spans
// to be freed at the end of GC.
//
// TODO(austin): Make it possible to re-use
// these spans as stacks, like we do for small
// stack spans. (See issue #11466.)
lock(&stackpoolmu)
stackFreeQueue.insert(s)
unlock(&stackpoolmu)
// If the GC is running, we can't return a
// stack span to the heap because it could be
// reused as a heap span, and this state
// change would race with GC. Add it to the
// large stack cache instead.
log2npage := stacklog2(s.npages)
lock(&stackLarge.lock)
stackLarge.free[log2npage].insert(s)
unlock(&stackLarge.lock)
}
}
}
@ -1016,14 +1045,19 @@ func freeStackSpans() {
}
}
// Free queued stack spans.
for !stackFreeQueue.isEmpty() {
s := stackFreeQueue.first
stackFreeQueue.remove(s)
mheap_.freeStack(s)
}
unlock(&stackpoolmu)
// Free large stack spans.
lock(&stackLarge.lock)
for i := range stackLarge.free {
for s := stackLarge.free[i].first; s != nil; {
next := s.next
stackLarge.free[i].remove(s)
mheap_.freeStack(s)
s = next
}
}
unlock(&stackLarge.lock)
}
//go:nosplit