From e9db7b9dd147d787883813a772b7cd5700b87114 Mon Sep 17 00:00:00 2001 From: Austin Clements Date: Fri, 16 Feb 2018 17:53:16 -0500 Subject: [PATCH] runtime: abstract indexing of arena index Accessing the arena index is about to get slightly more complicated. Abstract this away into a set of functions for going back and forth between addresses and arena slice indexes. For #23862. Change-Id: I0b20e74ef47a07b78ed0cf0a6128afe6f6e40f4b Reviewed-on: https://go-review.googlesource.com/95496 Run-TryBot: Austin Clements TryBot-Result: Gobot Gobot Reviewed-by: Rick Hudson --- src/runtime/heapdump.go | 4 ++-- src/runtime/malloc.go | 29 ++++++++++++++++++++++------- src/runtime/mbitmap.go | 4 ++-- src/runtime/mheap.go | 36 ++++++++++++++++++++++++++++++------ 4 files changed, 56 insertions(+), 17 deletions(-) diff --git a/src/runtime/heapdump.go b/src/runtime/heapdump.go index 8854a5b634..362cb7c308 100644 --- a/src/runtime/heapdump.go +++ b/src/runtime/heapdump.go @@ -492,9 +492,9 @@ func dumpparams() { for i, ha := range mheap_.arenas { if ha != nil { if arenaStart == 0 { - arenaStart = uintptr(i) * heapArenaBytes + arenaStart = arenaBase(uint(i)) } - arenaEnd = uintptr(i+1) * heapArenaBytes + arenaEnd = arenaBase(uint(i)) + heapArenaBytes } } dumpint(uint64(arenaStart)) diff --git a/src/runtime/malloc.go b/src/runtime/malloc.go index e9150fdbb6..a44dcd8c9d 100644 --- a/src/runtime/malloc.go +++ b/src/runtime/malloc.go @@ -451,9 +451,12 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { if hint.down { p -= n } - if p+n < p || p+n >= memLimit-1 { + if p+n < p { // We can't use this, so don't ask. v = nil + } else if arenaIndex(p+n-1) >= uint(len(mheap_.arenas)) { + // Outside addressable heap. Can't use. + v = nil } else { v = sysReserve(unsafe.Pointer(p), n) } @@ -497,11 +500,23 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { hint.next, mheap_.arenaHints = mheap_.arenaHints, hint } - if v := uintptr(v); v+size < v || v+size >= memLimit-1 { - // This should be impossible on most architectures, - // but it would be really confusing to debug. - print("runtime: memory allocated by OS [", hex(v), ", ", hex(v+size), ") exceeds address space limit (", hex(int64(memLimit)), ")\n") - throw("memory reservation exceeds address space limit") + // Check for bad pointers or pointers we can't use. + { + var bad string + p := uintptr(v) + if p+size < p { + bad = "region exceeds uintptr range" + } else if arenaIndex(p) >= uint(len(mheap_.arenas)) { + bad = "base outside usable address space" + } else if arenaIndex(p+size-1) >= uint(len(mheap_.arenas)) { + bad = "end outside usable address space" + } + if bad != "" { + // This should be impossible on most architectures, + // but it would be really confusing to debug. + print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n") + throw("memory reservation exceeds address space limit") + } } if uintptr(v)&(heapArenaBytes-1) != 0 { @@ -513,7 +528,7 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) { mapped: // Create arena metadata. - for ri := uintptr(v) / heapArenaBytes; ri < (uintptr(v)+size)/heapArenaBytes; ri++ { + for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ { if h.arenas[ri] != nil { throw("arena already initialized") } diff --git a/src/runtime/mbitmap.go b/src/runtime/mbitmap.go index 3dc22e8458..baae3d911b 100644 --- a/src/runtime/mbitmap.go +++ b/src/runtime/mbitmap.go @@ -335,7 +335,7 @@ func (m *markBits) advance() { func heapBitsForAddr(addr uintptr) heapBits { // 2 bits per word, 4 pairs per byte, and a mask is hard coded. off := addr / sys.PtrSize - arena := addr / heapArenaBytes + arena := arenaIndex(addr) ha := mheap_.arenas[arena] // The compiler uses a load for nil checking ha, but in this // case we'll almost never hit that cache line again, so it @@ -971,7 +971,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) { // machine instructions. outOfPlace := false - if (x+size-1)/heapArenaBytes != uintptr(h.arena) { + if arenaIndex(x+size-1) != uint(h.arena) { // This object spans heap arenas, so the bitmap may be // discontiguous. Unroll it into the object instead // and then copy it out. diff --git a/src/runtime/mheap.go b/src/runtime/mheap.go index 30df753c86..88fcbdda6e 100644 --- a/src/runtime/mheap.go +++ b/src/runtime/mheap.go @@ -99,6 +99,8 @@ type mheap struct { // arenas is the heap arena index. arenas[va/heapArenaBytes] // points to the metadata for the heap arena containing va. // + // Use arenaIndex to compute indexes into this array. + // // For regions of the address space that are not backed by the // Go heap, the arena index contains nil. // @@ -407,6 +409,24 @@ func (sc spanClass) noscan() bool { return sc&1 != 0 } +// arenaIndex returns the mheap_.arenas index of the arena containing +// metadata for p. If p is outside the range of valid heap addresses, +// it returns an index larger than len(mheap_.arenas). +// +// It is nosplit because it's called by spanOf and several other +// nosplit functions. +// +//go:nosplit +func arenaIndex(p uintptr) uint { + return uint(p / heapArenaBytes) +} + +// arenaBase returns the low address of the region covered by heap +// arena i. +func arenaBase(i uint) uintptr { + return uintptr(i) * heapArenaBytes +} + // inheap reports whether b is a pointer into a (potentially dead) heap object. // It returns false for pointers into _MSpanManual spans. // Non-preemptible because it is used by write barriers. @@ -446,10 +466,14 @@ func inHeapOrStack(b uintptr) bool { // //go:nosplit func spanOf(p uintptr) *mspan { - if p < minLegalPointer || p/heapArenaBytes >= uintptr(len(mheap_.arenas)) { + if p < minLegalPointer { return nil } - ha := mheap_.arenas[p/heapArenaBytes] + ri := arenaIndex(p) + if ri >= uint(len(mheap_.arenas)) { + return nil + } + ha := mheap_.arenas[ri] if ha == nil { return nil } @@ -463,7 +487,7 @@ func spanOf(p uintptr) *mspan { // //go:nosplit func spanOfUnchecked(p uintptr) *mspan { - return mheap_.arenas[p/heapArenaBytes].spans[(p/pageSize)%pagesPerArena] + return mheap_.arenas[arenaIndex(p)].spans[(p/pageSize)%pagesPerArena] } // spanOfHeap is like spanOf, but returns nil if p does not point to a @@ -738,18 +762,18 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan { // setSpan modifies the span map so spanOf(base) is s. func (h *mheap) setSpan(base uintptr, s *mspan) { - h.arenas[base/heapArenaBytes].spans[(base/pageSize)%pagesPerArena] = s + h.arenas[arenaIndex(base)].spans[(base/pageSize)%pagesPerArena] = s } // setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize)) // is s. func (h *mheap) setSpans(base, npage uintptr, s *mspan) { p := base / pageSize - ha := h.arenas[p/pagesPerArena] + ha := h.arenas[arenaIndex(base)] for n := uintptr(0); n < npage; n++ { i := (p + n) % pagesPerArena if i == 0 { - ha = h.arenas[(p+n)/pagesPerArena] + ha = h.arenas[arenaIndex(base+n*pageSize)] } ha.spans[i] = s }