1
0
mirror of https://github.com/golang/go synced 2024-09-30 13:18:34 -06:00

runtime: accept non-monotonic arena allocation on 32-bit

Currently, the heap arena allocator allocates monotonically increasing
addresses. This is fine on 64-bit where we stake out a giant block of
the address space for ourselves and start at the beginning of it, but
on 32-bit the arena starts at address 0 but we start allocating from
wherever the OS feels like giving us memory. We can generally hint the
OS to start us at a low address, but this doesn't always work.

As a result, on 32-bit, if the OS gives us an arena block that's lower
than the current block we're allocating from, we simply say "thanks
but no thanks", return the whole (256MB!) block of memory, and then
take a fallback path that mmaps just the amount of memory we need
(which may be as little as 8K).

We have to do this because mheap_.arena_used is *both* the highest
used address in the arena and the next address we allocate from.

Fix all of this by separating the second role of arena_used out into a
new field called arena_alloc. This lets us accept any arena block the
OS gives us. This also slightly changes the invariants around
arena_end. Previously, we ensured arena_used <= arena_end, but this
was related to arena_used's second role, so the new invariant is
arena_alloc <= arena_end. As a result, we no longer necessarily update
arena_end when we're updating arena_used.

Fixes #20259 properly. (Unlike the original fix, this one should not
be cherry-picked to Go 1.8.)

This is reasonably low risk. I verified several key properties of the
32-bit code path with both 4K and 64K physical pages using a symbolic
model and the change does not materially affect 64-bit (arena_used ==
arena_alloc on 64-bit). The only oddity is that we no longer call
setArenaUsed with racemap == false to indicate that we're creating a
hole in the address space, but this only happened in a 32-bit-only
code path, and the race detector require 64-bit, so this never
mattered anyway.

Change-Id: Ib1334007933e615166bac4159bf357ae06ec6a25
Reviewed-on: https://go-review.googlesource.com/44010
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
Austin Clements 2017-05-23 17:07:26 -04:00
parent a9d8d4df61
commit 13ae3b3a8d
2 changed files with 61 additions and 29 deletions

View File

@ -380,6 +380,7 @@ func mallocinit() {
} }
mheap_.arena_end = p + pSize mheap_.arena_end = p + pSize
mheap_.arena_used = p1 mheap_.arena_used = p1
mheap_.arena_alloc = p1
mheap_.arena_reserved = reserved mheap_.arena_reserved = reserved
if mheap_.arena_start&(_PageSize-1) != 0 { if mheap_.arena_start&(_PageSize-1) != 0 {
@ -398,7 +399,13 @@ func mallocinit() {
// h.arena_start and h.arena_end. sysAlloc returns nil on failure. // h.arena_start and h.arena_end. sysAlloc returns nil on failure.
// There is no corresponding free function. // There is no corresponding free function.
func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer { func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
if n > h.arena_end-h.arena_used { // strandLimit is the maximum number of bytes to strand from
// the current arena block. If we would need to strand more
// than this, we fall back to sysAlloc'ing just enough for
// this allocation.
const strandLimit = 16 << 20
if n > h.arena_end-h.arena_alloc {
// If we haven't grown the arena to _MaxMem yet, try // If we haven't grown the arena to _MaxMem yet, try
// to reserve some more address space. // to reserve some more address space.
p_size := round(n+_PageSize, 256<<20) p_size := round(n+_PageSize, 256<<20)
@ -414,48 +421,54 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
// p can be just about anywhere in the address // p can be just about anywhere in the address
// space, including before arena_end. // space, including before arena_end.
if p == h.arena_end { if p == h.arena_end {
// The new reservation is contiguous // The new block is contiguous with
// with the old reservation. // the current block. Extend the
// current arena block.
h.arena_end = new_end h.arena_end = new_end
h.arena_reserved = reserved h.arena_reserved = reserved
} else if h.arena_end < p && p+p_size-h.arena_start-1 <= _MaxMem { } else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxMem && h.arena_end-h.arena_alloc < strandLimit {
// We were able to reserve more memory // We were able to reserve more memory
// within the arena space, but it's // within the arena space, but it's
// not contiguous with our previous // not contiguous with our previous
// reservation. Skip over the unused // reservation. It could be before or
// address space. // after our current arena_used.
// //
// Keep everything page-aligned. // Keep everything page-aligned.
// Our pages are bigger than hardware pages. // Our pages are bigger than hardware pages.
h.arena_end = p + p_size h.arena_end = p + p_size
used := p + (-p & (_PageSize - 1)) p = round(p, _PageSize)
h.setArenaUsed(used, false) h.arena_alloc = p
h.arena_reserved = reserved h.arena_reserved = reserved
} else { } else {
// We got a mapping, but it's not // We got a mapping, but either
// linear with our current arena, so //
// we can't use it. // 1) It's not in the arena, so we
// can't use it. (This should never
// happen on 32-bit.)
//
// 2) We would need to discard too
// much of our current arena block to
// use it.
// //
// TODO: Make it possible to allocate
// from this. We can't decrease
// arena_used, but we could introduce
// a new variable for the current
// allocation position.
// We haven't added this allocation to // We haven't added this allocation to
// the stats, so subtract it from a // the stats, so subtract it from a
// fake stat (but avoid underflow). // fake stat (but avoid underflow).
//
// We'll fall back to a small sysAlloc.
stat := uint64(p_size) stat := uint64(p_size)
sysFree(unsafe.Pointer(p), p_size, &stat) sysFree(unsafe.Pointer(p), p_size, &stat)
} }
} }
} }
if n <= h.arena_end-h.arena_used { if n <= h.arena_end-h.arena_alloc {
// Keep taking from our reservation. // Keep taking from our reservation.
p := h.arena_used p := h.arena_alloc
sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
h.setArenaUsed(p+n, true) h.arena_alloc += n
if h.arena_alloc > h.arena_used {
h.setArenaUsed(h.arena_alloc, true)
}
if p&(_PageSize-1) != 0 { if p&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc") throw("misrounded allocation in MHeap_SysAlloc")
@ -485,13 +498,9 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
return nil return nil
} }
p_end := p + p_size
p += -p & (_PageSize - 1) p += -p & (_PageSize - 1)
if p+n > h.arena_used { if p+n > h.arena_used {
h.setArenaUsed(p+n, true) h.setArenaUsed(p+n, true)
if p_end > h.arena_end {
h.arena_end = p_end
}
} }
if p&(_PageSize-1) != 0 { if p&(_PageSize-1) != 0 {

View File

@ -108,13 +108,36 @@ type mheap struct {
nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize) nsmallfree [_NumSizeClasses]uint64 // number of frees for small objects (<=maxsmallsize)
// range of addresses we might see in the heap // range of addresses we might see in the heap
bitmap uintptr // Points to one byte past the end of the bitmap bitmap uintptr // Points to one byte past the end of the bitmap
bitmap_mapped uintptr bitmap_mapped uintptr
arena_start uintptr
arena_used uintptr // One byte past usable heap arena. Set with setArenaUsed. // The arena_* fields indicate the addresses of the Go heap.
arena_end uintptr //
// The maximum range of the Go heap is
// [arena_start, arena_start+_MaxMem+1).
//
// The range of the current Go heap is
// [arena_start, arena_used). Parts of this range may not be
// mapped, but the metadata structures are always mapped for
// the full range.
arena_start uintptr
arena_used uintptr // Set with setArenaUsed.
// The heap is grown using a linear allocator that allocates
// from the block [arena_alloc, arena_end). arena_alloc is
// often, but *not always* equal to arena_used.
arena_alloc uintptr
arena_end uintptr
// arena_reserved indicates that the memory [arena_alloc,
// arena_end) is reserved (e.g., mapped PROT_NONE). If this is
// false, we have to be careful not to clobber existing
// mappings here. If this is true, then we own the mapping
// here and *must* clobber it to use it.
arena_reserved bool arena_reserved bool
_ uint32 // ensure 64-bit alignment
// central free lists for small size classes. // central free lists for small size classes.
// the padding makes sure that the MCentrals are // the padding makes sure that the MCentrals are
// spaced CacheLineSize bytes apart, so that each MCentral.lock // spaced CacheLineSize bytes apart, so that each MCentral.lock