1
0
mirror of https://github.com/golang/go synced 2024-11-11 23:20:24 -07:00

runtime: wait to update arena_used until after mapping bitmap

This avoids a race with gcmarkwb_m that was leading to faults.

Fixes #10212.

Change-Id: I6fcf8d09f2692227063ce29152cb57366ea22487
Reviewed-on: https://go-review.googlesource.com/10816
Run-TryBot: Russ Cox <rsc@golang.org>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Russ Cox 2015-06-07 22:59:29 -04:00 committed by Austin Clements
parent a788c913fa
commit d57c889ae8
3 changed files with 24 additions and 11 deletions

View File

@ -418,9 +418,9 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
// Keep taking from our reservation.
p := h.arena_used
sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys)
h.arena_used += n
mHeap_MapBits(h)
mHeap_MapSpans(h)
mHeap_MapBits(h, p+n)
mHeap_MapSpans(h, p+n)
h.arena_used = p+n
if raceenabled {
racemapshadow((unsafe.Pointer)(p), n)
}
@ -454,12 +454,12 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
p_end := p + p_size
p += -p & (_PageSize - 1)
if uintptr(p)+n > uintptr(h.arena_used) {
h.arena_used = p + n
mHeap_MapBits(h, p+n)
mHeap_MapSpans(h, p+n)
h.arena_used = p+n
if p_end > h.arena_end {
h.arena_end = p_end
}
mHeap_MapBits(h)
mHeap_MapSpans(h)
if raceenabled {
racemapshadow((unsafe.Pointer)(p), n)
}

View File

@ -118,15 +118,20 @@ func subtract1(p *byte) *byte {
// mHeap_MapBits is called each time arena_used is extended.
// It maps any additional bitmap memory needed for the new arena memory.
// It must be called with the expected new value of arena_used,
// *before* h.arena_used has been updated.
// Waiting to update arena_used until after the memory has been mapped
// avoids faults when other threads try access the bitmap immediately
// after observing the change to arena_used.
//
//go:nowritebarrier
func mHeap_MapBits(h *mheap) {
func mHeap_MapBits(h *mheap, arena_used uintptr) {
// Caller has added extra mappings to the arena.
// Add extra mappings of bitmap words as needed.
// We allocate extra bitmap pieces in chunks of bitmapChunk.
const bitmapChunk = 8192
n := (mheap_.arena_used - mheap_.arena_start) / heapBitmapScale
n := (arena_used - mheap_.arena_start) / heapBitmapScale
n = round(n, bitmapChunk)
n = round(n, _PhysPageSize)
if h.bitmap_mapped >= n {

View File

@ -279,10 +279,18 @@ func mHeap_Init(h *mheap, spans_size uintptr) {
sp.cap = int(spans_size / ptrSize)
}
func mHeap_MapSpans(h *mheap) {
// mHeap_MapSpans makes sure that the spans are mapped
// up to the new value of arena_used.
//
// It must be called with the expected new value of arena_used,
// *before* h.arena_used has been updated.
// Waiting to update arena_used until after the memory has been mapped
// avoids faults when other threads try access the bitmap immediately
// after observing the change to arena_used.
func mHeap_MapSpans(h *mheap, arena_used uintptr) {
// Map spans array, PageSize at a time.
n := uintptr(unsafe.Pointer(h.arena_used))
n -= uintptr(unsafe.Pointer(h.arena_start))
n := arena_used
n -= h.arena_start
n = n / _PageSize * ptrSize
n = round(n, _PhysPageSize)
if h.spans_mapped >= n {