mirror of
https://github.com/golang/go
synced 2024-11-12 05:50:21 -07:00
runtime: wait to update arena_used until after mapping bitmap
This avoids a race with gcmarkwb_m that was leading to faults. Fixes #10212. Change-Id: I6fcf8d09f2692227063ce29152cb57366ea22487 Reviewed-on: https://go-review.googlesource.com/10816 Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
a788c913fa
commit
d57c889ae8
@ -418,9 +418,9 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
|||||||
// Keep taking from our reservation.
|
// Keep taking from our reservation.
|
||||||
p := h.arena_used
|
p := h.arena_used
|
||||||
sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys)
|
sysMap((unsafe.Pointer)(p), n, h.arena_reserved, &memstats.heap_sys)
|
||||||
h.arena_used += n
|
mHeap_MapBits(h, p+n)
|
||||||
mHeap_MapBits(h)
|
mHeap_MapSpans(h, p+n)
|
||||||
mHeap_MapSpans(h)
|
h.arena_used = p+n
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
racemapshadow((unsafe.Pointer)(p), n)
|
racemapshadow((unsafe.Pointer)(p), n)
|
||||||
}
|
}
|
||||||
@ -454,12 +454,12 @@ func mHeap_SysAlloc(h *mheap, n uintptr) unsafe.Pointer {
|
|||||||
p_end := p + p_size
|
p_end := p + p_size
|
||||||
p += -p & (_PageSize - 1)
|
p += -p & (_PageSize - 1)
|
||||||
if uintptr(p)+n > uintptr(h.arena_used) {
|
if uintptr(p)+n > uintptr(h.arena_used) {
|
||||||
h.arena_used = p + n
|
mHeap_MapBits(h, p+n)
|
||||||
|
mHeap_MapSpans(h, p+n)
|
||||||
|
h.arena_used = p+n
|
||||||
if p_end > h.arena_end {
|
if p_end > h.arena_end {
|
||||||
h.arena_end = p_end
|
h.arena_end = p_end
|
||||||
}
|
}
|
||||||
mHeap_MapBits(h)
|
|
||||||
mHeap_MapSpans(h)
|
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
racemapshadow((unsafe.Pointer)(p), n)
|
racemapshadow((unsafe.Pointer)(p), n)
|
||||||
}
|
}
|
||||||
|
@ -118,15 +118,20 @@ func subtract1(p *byte) *byte {
|
|||||||
|
|
||||||
// mHeap_MapBits is called each time arena_used is extended.
|
// mHeap_MapBits is called each time arena_used is extended.
|
||||||
// It maps any additional bitmap memory needed for the new arena memory.
|
// It maps any additional bitmap memory needed for the new arena memory.
|
||||||
|
// It must be called with the expected new value of arena_used,
|
||||||
|
// *before* h.arena_used has been updated.
|
||||||
|
// Waiting to update arena_used until after the memory has been mapped
|
||||||
|
// avoids faults when other threads try access the bitmap immediately
|
||||||
|
// after observing the change to arena_used.
|
||||||
//
|
//
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func mHeap_MapBits(h *mheap) {
|
func mHeap_MapBits(h *mheap, arena_used uintptr) {
|
||||||
// Caller has added extra mappings to the arena.
|
// Caller has added extra mappings to the arena.
|
||||||
// Add extra mappings of bitmap words as needed.
|
// Add extra mappings of bitmap words as needed.
|
||||||
// We allocate extra bitmap pieces in chunks of bitmapChunk.
|
// We allocate extra bitmap pieces in chunks of bitmapChunk.
|
||||||
const bitmapChunk = 8192
|
const bitmapChunk = 8192
|
||||||
|
|
||||||
n := (mheap_.arena_used - mheap_.arena_start) / heapBitmapScale
|
n := (arena_used - mheap_.arena_start) / heapBitmapScale
|
||||||
n = round(n, bitmapChunk)
|
n = round(n, bitmapChunk)
|
||||||
n = round(n, _PhysPageSize)
|
n = round(n, _PhysPageSize)
|
||||||
if h.bitmap_mapped >= n {
|
if h.bitmap_mapped >= n {
|
||||||
|
@ -279,10 +279,18 @@ func mHeap_Init(h *mheap, spans_size uintptr) {
|
|||||||
sp.cap = int(spans_size / ptrSize)
|
sp.cap = int(spans_size / ptrSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
func mHeap_MapSpans(h *mheap) {
|
// mHeap_MapSpans makes sure that the spans are mapped
|
||||||
|
// up to the new value of arena_used.
|
||||||
|
//
|
||||||
|
// It must be called with the expected new value of arena_used,
|
||||||
|
// *before* h.arena_used has been updated.
|
||||||
|
// Waiting to update arena_used until after the memory has been mapped
|
||||||
|
// avoids faults when other threads try access the bitmap immediately
|
||||||
|
// after observing the change to arena_used.
|
||||||
|
func mHeap_MapSpans(h *mheap, arena_used uintptr) {
|
||||||
// Map spans array, PageSize at a time.
|
// Map spans array, PageSize at a time.
|
||||||
n := uintptr(unsafe.Pointer(h.arena_used))
|
n := arena_used
|
||||||
n -= uintptr(unsafe.Pointer(h.arena_start))
|
n -= h.arena_start
|
||||||
n = n / _PageSize * ptrSize
|
n = n / _PageSize * ptrSize
|
||||||
n = round(n, _PhysPageSize)
|
n = round(n, _PhysPageSize)
|
||||||
if h.spans_mapped >= n {
|
if h.spans_mapped >= n {
|
||||||
|
Loading…
Reference in New Issue
Block a user