mirror of
https://github.com/golang/go
synced 2024-11-19 14:24:47 -07:00
runtime: abstract indexing of arena index
Accessing the arena index is about to get slightly more complicated. Abstract this away into a set of functions for going back and forth between addresses and arena slice indexes. For #23862. Change-Id: I0b20e74ef47a07b78ed0cf0a6128afe6f6e40f4b Reviewed-on: https://go-review.googlesource.com/95496 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
3e214e5693
commit
e9db7b9dd1
@ -492,9 +492,9 @@ func dumpparams() {
|
|||||||
for i, ha := range mheap_.arenas {
|
for i, ha := range mheap_.arenas {
|
||||||
if ha != nil {
|
if ha != nil {
|
||||||
if arenaStart == 0 {
|
if arenaStart == 0 {
|
||||||
arenaStart = uintptr(i) * heapArenaBytes
|
arenaStart = arenaBase(uint(i))
|
||||||
}
|
}
|
||||||
arenaEnd = uintptr(i+1) * heapArenaBytes
|
arenaEnd = arenaBase(uint(i)) + heapArenaBytes
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
dumpint(uint64(arenaStart))
|
dumpint(uint64(arenaStart))
|
||||||
|
@ -451,9 +451,12 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
|
|||||||
if hint.down {
|
if hint.down {
|
||||||
p -= n
|
p -= n
|
||||||
}
|
}
|
||||||
if p+n < p || p+n >= memLimit-1 {
|
if p+n < p {
|
||||||
// We can't use this, so don't ask.
|
// We can't use this, so don't ask.
|
||||||
v = nil
|
v = nil
|
||||||
|
} else if arenaIndex(p+n-1) >= uint(len(mheap_.arenas)) {
|
||||||
|
// Outside addressable heap. Can't use.
|
||||||
|
v = nil
|
||||||
} else {
|
} else {
|
||||||
v = sysReserve(unsafe.Pointer(p), n)
|
v = sysReserve(unsafe.Pointer(p), n)
|
||||||
}
|
}
|
||||||
@ -497,12 +500,24 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
|
|||||||
hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
|
hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
|
||||||
}
|
}
|
||||||
|
|
||||||
if v := uintptr(v); v+size < v || v+size >= memLimit-1 {
|
// Check for bad pointers or pointers we can't use.
|
||||||
|
{
|
||||||
|
var bad string
|
||||||
|
p := uintptr(v)
|
||||||
|
if p+size < p {
|
||||||
|
bad = "region exceeds uintptr range"
|
||||||
|
} else if arenaIndex(p) >= uint(len(mheap_.arenas)) {
|
||||||
|
bad = "base outside usable address space"
|
||||||
|
} else if arenaIndex(p+size-1) >= uint(len(mheap_.arenas)) {
|
||||||
|
bad = "end outside usable address space"
|
||||||
|
}
|
||||||
|
if bad != "" {
|
||||||
// This should be impossible on most architectures,
|
// This should be impossible on most architectures,
|
||||||
// but it would be really confusing to debug.
|
// but it would be really confusing to debug.
|
||||||
print("runtime: memory allocated by OS [", hex(v), ", ", hex(v+size), ") exceeds address space limit (", hex(int64(memLimit)), ")\n")
|
print("runtime: memory allocated by OS [", hex(p), ", ", hex(p+size), ") not in usable address space: ", bad, "\n")
|
||||||
throw("memory reservation exceeds address space limit")
|
throw("memory reservation exceeds address space limit")
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if uintptr(v)&(heapArenaBytes-1) != 0 {
|
if uintptr(v)&(heapArenaBytes-1) != 0 {
|
||||||
throw("misrounded allocation in sysAlloc")
|
throw("misrounded allocation in sysAlloc")
|
||||||
@ -513,7 +528,7 @@ func (h *mheap) sysAlloc(n uintptr) (v unsafe.Pointer, size uintptr) {
|
|||||||
|
|
||||||
mapped:
|
mapped:
|
||||||
// Create arena metadata.
|
// Create arena metadata.
|
||||||
for ri := uintptr(v) / heapArenaBytes; ri < (uintptr(v)+size)/heapArenaBytes; ri++ {
|
for ri := arenaIndex(uintptr(v)); ri <= arenaIndex(uintptr(v)+size-1); ri++ {
|
||||||
if h.arenas[ri] != nil {
|
if h.arenas[ri] != nil {
|
||||||
throw("arena already initialized")
|
throw("arena already initialized")
|
||||||
}
|
}
|
||||||
|
@ -335,7 +335,7 @@ func (m *markBits) advance() {
|
|||||||
func heapBitsForAddr(addr uintptr) heapBits {
|
func heapBitsForAddr(addr uintptr) heapBits {
|
||||||
// 2 bits per word, 4 pairs per byte, and a mask is hard coded.
|
// 2 bits per word, 4 pairs per byte, and a mask is hard coded.
|
||||||
off := addr / sys.PtrSize
|
off := addr / sys.PtrSize
|
||||||
arena := addr / heapArenaBytes
|
arena := arenaIndex(addr)
|
||||||
ha := mheap_.arenas[arena]
|
ha := mheap_.arenas[arena]
|
||||||
// The compiler uses a load for nil checking ha, but in this
|
// The compiler uses a load for nil checking ha, but in this
|
||||||
// case we'll almost never hit that cache line again, so it
|
// case we'll almost never hit that cache line again, so it
|
||||||
@ -971,7 +971,7 @@ func heapBitsSetType(x, size, dataSize uintptr, typ *_type) {
|
|||||||
// machine instructions.
|
// machine instructions.
|
||||||
|
|
||||||
outOfPlace := false
|
outOfPlace := false
|
||||||
if (x+size-1)/heapArenaBytes != uintptr(h.arena) {
|
if arenaIndex(x+size-1) != uint(h.arena) {
|
||||||
// This object spans heap arenas, so the bitmap may be
|
// This object spans heap arenas, so the bitmap may be
|
||||||
// discontiguous. Unroll it into the object instead
|
// discontiguous. Unroll it into the object instead
|
||||||
// and then copy it out.
|
// and then copy it out.
|
||||||
|
@ -99,6 +99,8 @@ type mheap struct {
|
|||||||
// arenas is the heap arena index. arenas[va/heapArenaBytes]
|
// arenas is the heap arena index. arenas[va/heapArenaBytes]
|
||||||
// points to the metadata for the heap arena containing va.
|
// points to the metadata for the heap arena containing va.
|
||||||
//
|
//
|
||||||
|
// Use arenaIndex to compute indexes into this array.
|
||||||
|
//
|
||||||
// For regions of the address space that are not backed by the
|
// For regions of the address space that are not backed by the
|
||||||
// Go heap, the arena index contains nil.
|
// Go heap, the arena index contains nil.
|
||||||
//
|
//
|
||||||
@ -407,6 +409,24 @@ func (sc spanClass) noscan() bool {
|
|||||||
return sc&1 != 0
|
return sc&1 != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// arenaIndex returns the mheap_.arenas index of the arena containing
|
||||||
|
// metadata for p. If p is outside the range of valid heap addresses,
|
||||||
|
// it returns an index larger than len(mheap_.arenas).
|
||||||
|
//
|
||||||
|
// It is nosplit because it's called by spanOf and several other
|
||||||
|
// nosplit functions.
|
||||||
|
//
|
||||||
|
//go:nosplit
|
||||||
|
func arenaIndex(p uintptr) uint {
|
||||||
|
return uint(p / heapArenaBytes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// arenaBase returns the low address of the region covered by heap
|
||||||
|
// arena i.
|
||||||
|
func arenaBase(i uint) uintptr {
|
||||||
|
return uintptr(i) * heapArenaBytes
|
||||||
|
}
|
||||||
|
|
||||||
// inheap reports whether b is a pointer into a (potentially dead) heap object.
|
// inheap reports whether b is a pointer into a (potentially dead) heap object.
|
||||||
// It returns false for pointers into _MSpanManual spans.
|
// It returns false for pointers into _MSpanManual spans.
|
||||||
// Non-preemptible because it is used by write barriers.
|
// Non-preemptible because it is used by write barriers.
|
||||||
@ -446,10 +466,14 @@ func inHeapOrStack(b uintptr) bool {
|
|||||||
//
|
//
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func spanOf(p uintptr) *mspan {
|
func spanOf(p uintptr) *mspan {
|
||||||
if p < minLegalPointer || p/heapArenaBytes >= uintptr(len(mheap_.arenas)) {
|
if p < minLegalPointer {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ha := mheap_.arenas[p/heapArenaBytes]
|
ri := arenaIndex(p)
|
||||||
|
if ri >= uint(len(mheap_.arenas)) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
ha := mheap_.arenas[ri]
|
||||||
if ha == nil {
|
if ha == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -463,7 +487,7 @@ func spanOf(p uintptr) *mspan {
|
|||||||
//
|
//
|
||||||
//go:nosplit
|
//go:nosplit
|
||||||
func spanOfUnchecked(p uintptr) *mspan {
|
func spanOfUnchecked(p uintptr) *mspan {
|
||||||
return mheap_.arenas[p/heapArenaBytes].spans[(p/pageSize)%pagesPerArena]
|
return mheap_.arenas[arenaIndex(p)].spans[(p/pageSize)%pagesPerArena]
|
||||||
}
|
}
|
||||||
|
|
||||||
// spanOfHeap is like spanOf, but returns nil if p does not point to a
|
// spanOfHeap is like spanOf, but returns nil if p does not point to a
|
||||||
@ -738,18 +762,18 @@ func (h *mheap) allocManual(npage uintptr, stat *uint64) *mspan {
|
|||||||
|
|
||||||
// setSpan modifies the span map so spanOf(base) is s.
|
// setSpan modifies the span map so spanOf(base) is s.
|
||||||
func (h *mheap) setSpan(base uintptr, s *mspan) {
|
func (h *mheap) setSpan(base uintptr, s *mspan) {
|
||||||
h.arenas[base/heapArenaBytes].spans[(base/pageSize)%pagesPerArena] = s
|
h.arenas[arenaIndex(base)].spans[(base/pageSize)%pagesPerArena] = s
|
||||||
}
|
}
|
||||||
|
|
||||||
// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
|
// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
|
||||||
// is s.
|
// is s.
|
||||||
func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
|
func (h *mheap) setSpans(base, npage uintptr, s *mspan) {
|
||||||
p := base / pageSize
|
p := base / pageSize
|
||||||
ha := h.arenas[p/pagesPerArena]
|
ha := h.arenas[arenaIndex(base)]
|
||||||
for n := uintptr(0); n < npage; n++ {
|
for n := uintptr(0); n < npage; n++ {
|
||||||
i := (p + n) % pagesPerArena
|
i := (p + n) % pagesPerArena
|
||||||
if i == 0 {
|
if i == 0 {
|
||||||
ha = h.arenas[(p+n)/pagesPerArena]
|
ha = h.arenas[arenaIndex(base+n*pageSize)]
|
||||||
}
|
}
|
||||||
ha.spans[i] = s
|
ha.spans[i] = s
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user