mirror of
https://github.com/golang/go
synced 2024-11-19 11:04:47 -07:00
runtime: eliminate most uses of mheap_.arena_*
This replaces all uses of the mheap_.arena_* fields outside of mallocinit and sysAlloc. These fields fundamentally assume a contiguous heap between two bounds, so eliminating these is necessary for a sparse heap. Many of these are replaced with checks for non-nil spans at the test address (which in turn checks for a non-nil entry in the heap arena array). Some of them are just for debugging and somewhat meaningless with a sparse heap, so those we just delete. Updates #10460. Change-Id: I8345b95ffc610aed694f08f74633b3c63506a41f Reviewed-on: https://go-review.googlesource.com/85886 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
d6e8218581
commit
45ffeab549
@ -572,17 +572,7 @@ func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
|
|||||||
// pointer into Go memory. If it does, we panic.
|
// pointer into Go memory. If it does, we panic.
|
||||||
// The return values are unused but useful to see in panic tracebacks.
|
// The return values are unused but useful to see in panic tracebacks.
|
||||||
func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
|
func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
|
||||||
if cgoInRange(p, mheap_.arena_start, mheap_.arena_used) {
|
if inheap(uintptr(p)) {
|
||||||
if !inheap(uintptr(p)) {
|
|
||||||
// On 32-bit systems it is possible for C's allocated memory
|
|
||||||
// to have addresses between arena_start and arena_used.
|
|
||||||
// Either this pointer is a stack or an unused span or it's
|
|
||||||
// a C allocation. Escape analysis should prevent the first,
|
|
||||||
// garbage collection should prevent the second,
|
|
||||||
// and the third is completely OK.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
b, span, _ := findObject(uintptr(p), 0, 0)
|
b, span, _ := findObject(uintptr(p), 0, 0)
|
||||||
base = b
|
base = b
|
||||||
if base == 0 {
|
if base == 0 {
|
||||||
|
@ -488,8 +488,17 @@ func dumpparams() {
|
|||||||
dumpbool(true) // big-endian ptrs
|
dumpbool(true) // big-endian ptrs
|
||||||
}
|
}
|
||||||
dumpint(sys.PtrSize)
|
dumpint(sys.PtrSize)
|
||||||
dumpint(uint64(mheap_.arena_start))
|
var arenaStart, arenaEnd uintptr
|
||||||
dumpint(uint64(mheap_.arena_used))
|
for i, ha := range mheap_.arenas {
|
||||||
|
if ha != nil {
|
||||||
|
if arenaStart == 0 {
|
||||||
|
arenaStart = uintptr(i) * heapArenaBytes
|
||||||
|
}
|
||||||
|
arenaEnd = uintptr(i+1) * heapArenaBytes
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dumpint(uint64(arenaStart))
|
||||||
|
dumpint(uint64(arenaEnd))
|
||||||
dumpstr(sys.GOARCH)
|
dumpstr(sys.GOARCH)
|
||||||
dumpstr(sys.Goexperiment)
|
dumpstr(sys.Goexperiment)
|
||||||
dumpint(uint64(ncpu))
|
dumpint(uint64(ncpu))
|
||||||
|
@ -862,7 +862,7 @@ func largeAlloc(size uintptr, needzero bool, noscan bool) *mspan {
|
|||||||
throw("out of memory")
|
throw("out of memory")
|
||||||
}
|
}
|
||||||
s.limit = s.base() + size
|
s.limit = s.base() + size
|
||||||
heapBitsForSpan(s.base()).initSpan(s)
|
heapBitsForAddr(s.base()).initSpan(s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,9 +308,6 @@ func (m markBits) clearMarked() {
|
|||||||
|
|
||||||
// markBitsForSpan returns the markBits for the span base address base.
|
// markBitsForSpan returns the markBits for the span base address base.
|
||||||
func markBitsForSpan(base uintptr) (mbits markBits) {
|
func markBitsForSpan(base uintptr) (mbits markBits) {
|
||||||
if base < mheap_.arena_start || base >= mheap_.arena_used {
|
|
||||||
throw("markBitsForSpan: base out of range")
|
|
||||||
}
|
|
||||||
mbits = markBitsForAddr(base)
|
mbits = markBitsForAddr(base)
|
||||||
if mbits.mask != 1 {
|
if mbits.mask != 1 {
|
||||||
throw("markBitsForSpan: unaligned start")
|
throw("markBitsForSpan: unaligned start")
|
||||||
@ -352,15 +349,6 @@ func heapBitsForAddr(addr uintptr) heapBits {
|
|||||||
return heapBits{bitp, uint32(off & 3), uint32(arena), last}
|
return heapBits{bitp, uint32(off & 3), uint32(arena), last}
|
||||||
}
|
}
|
||||||
|
|
||||||
// heapBitsForSpan returns the heapBits for the span base address base.
|
|
||||||
func heapBitsForSpan(base uintptr) (hbits heapBits) {
|
|
||||||
if base < mheap_.arena_start || base >= mheap_.arena_used {
|
|
||||||
print("runtime: base ", hex(base), " not in range [", hex(mheap_.arena_start), ",", hex(mheap_.arena_used), ")\n")
|
|
||||||
throw("heapBitsForSpan: base out of range")
|
|
||||||
}
|
|
||||||
return heapBitsForAddr(base)
|
|
||||||
}
|
|
||||||
|
|
||||||
// findObject returns the base address for the heap object containing
|
// findObject returns the base address for the heap object containing
|
||||||
// the address p, the object's span, and the index of the object in s.
|
// the address p, the object's span, and the index of the object in s.
|
||||||
// If p does not point into a heap object, it returns base == 0.
|
// If p does not point into a heap object, it returns base == 0.
|
||||||
|
@ -237,6 +237,6 @@ func (c *mcentral) grow() *mspan {
|
|||||||
p := s.base()
|
p := s.base()
|
||||||
s.limit = p + size*n
|
s.limit = p + size*n
|
||||||
|
|
||||||
heapBitsForSpan(s.base()).initSpan(s)
|
heapBitsForAddr(s.base()).initSpan(s)
|
||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
@ -1085,9 +1085,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
|
|||||||
b := b0
|
b := b0
|
||||||
n := n0
|
n := n0
|
||||||
|
|
||||||
arena_start := mheap_.arena_start
|
|
||||||
arena_used := mheap_.arena_used
|
|
||||||
|
|
||||||
for i := uintptr(0); i < n; {
|
for i := uintptr(0); i < n; {
|
||||||
// Find bits for the next word.
|
// Find bits for the next word.
|
||||||
bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
|
bits := uint32(*addb(ptrmask, i/(sys.PtrSize*8)))
|
||||||
@ -1099,7 +1096,7 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
|
|||||||
if bits&1 != 0 {
|
if bits&1 != 0 {
|
||||||
// Same work as in scanobject; see comments there.
|
// Same work as in scanobject; see comments there.
|
||||||
obj := *(*uintptr)(unsafe.Pointer(b + i))
|
obj := *(*uintptr)(unsafe.Pointer(b + i))
|
||||||
if obj != 0 && arena_start <= obj && obj < arena_used {
|
if obj != 0 {
|
||||||
if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
|
if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
|
||||||
greyobject(obj, b, i, span, gcw, objIndex)
|
greyobject(obj, b, i, span, gcw, objIndex)
|
||||||
}
|
}
|
||||||
@ -1118,18 +1115,6 @@ func scanblock(b0, n0 uintptr, ptrmask *uint8, gcw *gcWork) {
|
|||||||
//
|
//
|
||||||
//go:nowritebarrier
|
//go:nowritebarrier
|
||||||
func scanobject(b uintptr, gcw *gcWork) {
|
func scanobject(b uintptr, gcw *gcWork) {
|
||||||
// Note that arena_used may change concurrently during
|
|
||||||
// scanobject and hence scanobject may encounter a pointer to
|
|
||||||
// a newly allocated heap object that is *not* in
|
|
||||||
// [start,used). It will not mark this object; however, we
|
|
||||||
// know that it was just installed by a mutator, which means
|
|
||||||
// that mutator will execute a write barrier and take care of
|
|
||||||
// marking it. This is even more pronounced on relaxed memory
|
|
||||||
// architectures since we access arena_used without barriers
|
|
||||||
// or synchronization, but the same logic applies.
|
|
||||||
arena_start := mheap_.arena_start
|
|
||||||
arena_used := mheap_.arena_used
|
|
||||||
|
|
||||||
// Find the bits for b and the size of the object at b.
|
// Find the bits for b and the size of the object at b.
|
||||||
//
|
//
|
||||||
// b is either the beginning of an object, in which case this
|
// b is either the beginning of an object, in which case this
|
||||||
@ -1203,9 +1188,17 @@ func scanobject(b uintptr, gcw *gcWork) {
|
|||||||
obj := *(*uintptr)(unsafe.Pointer(b + i))
|
obj := *(*uintptr)(unsafe.Pointer(b + i))
|
||||||
|
|
||||||
// At this point we have extracted the next potential pointer.
|
// At this point we have extracted the next potential pointer.
|
||||||
// Check if it points into heap and not back at the current object.
|
// Quickly filter out nil and pointers back to the current object.
|
||||||
if obj != 0 && arena_start <= obj && obj < arena_used && obj-b >= n {
|
if obj != 0 && obj-b >= n {
|
||||||
// Mark the object.
|
// Test if obj points into the Go heap and, if so,
|
||||||
|
// mark the object.
|
||||||
|
//
|
||||||
|
// Note that it's possible for findObject to
|
||||||
|
// fail if obj points to a just-allocated heap
|
||||||
|
// object because of a race with growing the
|
||||||
|
// heap. In this case, we know the object was
|
||||||
|
// just allocated and hence will be marked by
|
||||||
|
// allocation itself.
|
||||||
if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
|
if obj, span, objIndex := findObject(obj, b, i); obj != 0 {
|
||||||
greyobject(obj, b, i, span, gcw, objIndex)
|
greyobject(obj, b, i, span, gcw, objIndex)
|
||||||
}
|
}
|
||||||
@ -1305,10 +1298,6 @@ func greyobject(obj, base, off uintptr, span *mspan, gcw *gcWork, objIndex uintp
|
|||||||
// gcDumpObject dumps the contents of obj for debugging and marks the
|
// gcDumpObject dumps the contents of obj for debugging and marks the
|
||||||
// field at byte offset off in obj.
|
// field at byte offset off in obj.
|
||||||
func gcDumpObject(label string, obj, off uintptr) {
|
func gcDumpObject(label string, obj, off uintptr) {
|
||||||
if obj < mheap_.arena_start || obj >= mheap_.arena_used {
|
|
||||||
print(label, "=", hex(obj), " is not in the Go heap\n")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s := spanOf(obj)
|
s := spanOf(obj)
|
||||||
print(label, "=", hex(obj))
|
print(label, "=", hex(obj))
|
||||||
if s == nil {
|
if s == nil {
|
||||||
@ -1421,7 +1410,7 @@ func initCheckmarks() {
|
|||||||
useCheckmark = true
|
useCheckmark = true
|
||||||
for _, s := range mheap_.allspans {
|
for _, s := range mheap_.allspans {
|
||||||
if s.state == _MSpanInUse {
|
if s.state == _MSpanInUse {
|
||||||
heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
|
heapBitsForAddr(s.base()).initCheckmarkSpan(s.layout())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1430,7 +1419,7 @@ func clearCheckmarks() {
|
|||||||
useCheckmark = false
|
useCheckmark = false
|
||||||
for _, s := range mheap_.allspans {
|
for _, s := range mheap_.allspans {
|
||||||
if s.state == _MSpanInUse {
|
if s.state == _MSpanInUse {
|
||||||
heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())
|
heapBitsForAddr(s.base()).clearCheckmarkSpan(s.layout())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -232,9 +232,8 @@ func wbBufFlush1(_p_ *p) {
|
|||||||
// un-shaded stacks and flush after each stack scan.
|
// un-shaded stacks and flush after each stack scan.
|
||||||
gcw := &_p_.gcw
|
gcw := &_p_.gcw
|
||||||
pos := 0
|
pos := 0
|
||||||
arenaStart := mheap_.arena_start
|
|
||||||
for _, ptr := range ptrs {
|
for _, ptr := range ptrs {
|
||||||
if ptr < arenaStart {
|
if ptr < minLegalPointer {
|
||||||
// nil pointers are very common, especially
|
// nil pointers are very common, especially
|
||||||
// for the "old" values. Filter out these and
|
// for the "old" values. Filter out these and
|
||||||
// other "obvious" non-heap pointers ASAP.
|
// other "obvious" non-heap pointers ASAP.
|
||||||
|
Loading…
Reference in New Issue
Block a user