1
0
mirror of https://github.com/golang/go synced 2024-10-01 20:28:33 -06:00

runtime: consolidate changes to arena_used

Changing mheap_.arena_used requires several steps that are currently
repeated multiple times in mheap_.sysAlloc. Consolidate these into a
single function.

In the future, this will also make it easier to add other auxiliary VM
structures.

Change-Id: Ie68837d2612e1f4ba4904acb1b6b832b15431d56
Reviewed-on: https://go-review.googlesource.com/40151
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Gobot Gobot <gobot@golang.org>
Reviewed-by: Ian Lance Taylor <iant@golang.org>
This commit is contained in:
Austin Clements 2017-04-07 13:49:51 -04:00
parent 075ee299b1
commit 6c6f455f88
3 changed files with 42 additions and 29 deletions

View File

@ -397,16 +397,22 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
return nil return nil
} }
if p == h.arena_end { if p == h.arena_end {
// The new reservation is contiguous
// with the old reservation.
h.arena_end = new_end h.arena_end = new_end
h.arena_reserved = reserved h.arena_reserved = reserved
} else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxMem { } else if h.arena_start <= p && p+p_size-h.arena_start-1 <= _MaxMem {
// We were able to reserve more memory
// within the arena space, but it's
// not contiguous with our previous
// reservation. Skip over the unused
// address space.
//
// Keep everything page-aligned. // Keep everything page-aligned.
// Our pages are bigger than hardware pages. // Our pages are bigger than hardware pages.
h.arena_end = p + p_size h.arena_end = p + p_size
used := p + (-p & (_PageSize - 1)) used := p + (-p & (_PageSize - 1))
h.mapBits(used) h.setArenaUsed(used, false)
h.mapSpans(used)
h.arena_used = used
h.arena_reserved = reserved h.arena_reserved = reserved
} else { } else {
// We haven't added this allocation to // We haven't added this allocation to
@ -422,12 +428,7 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
// Keep taking from our reservation. // Keep taking from our reservation.
p := h.arena_used p := h.arena_used
sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys) sysMap(unsafe.Pointer(p), n, h.arena_reserved, &memstats.heap_sys)
h.mapBits(p + n) h.setArenaUsed(p+n, true)
h.mapSpans(p + n)
h.arena_used = p + n
if raceenabled {
racemapshadow(unsafe.Pointer(p), n)
}
if p&(_PageSize-1) != 0 { if p&(_PageSize-1) != 0 {
throw("misrounded allocation in MHeap_SysAlloc") throw("misrounded allocation in MHeap_SysAlloc")
@ -460,15 +461,10 @@ func (h *mheap) sysAlloc(n uintptr) unsafe.Pointer {
p_end := p + p_size p_end := p + p_size
p += -p & (_PageSize - 1) p += -p & (_PageSize - 1)
if p+n > h.arena_used { if p+n > h.arena_used {
h.mapBits(p + n) h.setArenaUsed(p+n, true)
h.mapSpans(p + n)
h.arena_used = p + n
if p_end > h.arena_end { if p_end > h.arena_end {
h.arena_end = p_end h.arena_end = p_end
} }
if raceenabled {
racemapshadow(unsafe.Pointer(p), n)
}
} }
if p&(_PageSize-1) != 0 { if p&(_PageSize-1) != 0 {

View File

@ -134,13 +134,9 @@ func subtract1(p *byte) *byte {
return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1)) return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) - 1))
} }
// mHeap_MapBits is called each time arena_used is extended. // mapBits maps any additional bitmap memory needed for the new arena memory.
// It maps any additional bitmap memory needed for the new arena memory. //
// It must be called with the expected new value of arena_used, // Don't call this directly. Call mheap.setArenaUsed.
// *before* h.arena_used has been updated.
// Waiting to update arena_used until after the memory has been mapped
// avoids faults when other threads try access the bitmap immediately
// after observing the change to arena_used.
// //
//go:nowritebarrier //go:nowritebarrier
func (h *mheap) mapBits(arena_used uintptr) { func (h *mheap) mapBits(arena_used uintptr) {

View File

@ -93,7 +93,7 @@ type mheap struct {
bitmap uintptr // Points to one byte past the end of the bitmap bitmap uintptr // Points to one byte past the end of the bitmap
bitmap_mapped uintptr bitmap_mapped uintptr
arena_start uintptr arena_start uintptr
arena_used uintptr // always mHeap_Map{Bits,Spans} before updating arena_used uintptr // One byte past usable heap arena. Set with setArenaUsed.
arena_end uintptr arena_end uintptr
arena_reserved bool arena_reserved bool
@ -435,14 +435,35 @@ func (h *mheap) init(spansStart, spansBytes uintptr) {
sp.cap = int(spansBytes / sys.PtrSize) sp.cap = int(spansBytes / sys.PtrSize)
} }
// mHeap_MapSpans makes sure that the spans are mapped // setArenaUsed extends the usable arena to address arena_used and
// maps auxiliary VM regions for any newly usable arena space.
//
// racemap indicates that this memory should be managed by the race
// detector. racemap should be true unless this is covering a VM hole.
func (h *mheap) setArenaUsed(arena_used uintptr, racemap bool) {
// Map auxiliary structures *before* h.arena_used is updated.
// Waiting to update arena_used until after the memory has been mapped
// avoids faults when other threads try access these regions immediately
// after observing the change to arena_used.
// Map the bitmap.
h.mapBits(arena_used)
// Map spans array.
h.mapSpans(arena_used)
// Tell the race detector about the new heap memory.
if racemap && raceenabled {
racemapshadow(unsafe.Pointer(h.arena_used), arena_used-h.arena_used)
}
h.arena_used = arena_used
}
// mapSpans makes sure that the spans are mapped
// up to the new value of arena_used. // up to the new value of arena_used.
// //
// It must be called with the expected new value of arena_used, // Don't call this directly. Call mheap.setArenaUsed.
// *before* h.arena_used has been updated.
// Waiting to update arena_used until after the memory has been mapped
// avoids faults when other threads try access the bitmap immediately
// after observing the change to arena_used.
func (h *mheap) mapSpans(arena_used uintptr) { func (h *mheap) mapSpans(arena_used uintptr) {
// Map spans array, PageSize at a time. // Map spans array, PageSize at a time.
n := arena_used n := arena_used