mirror of
https://github.com/golang/go
synced 2024-11-19 12:34:47 -07:00
runtime: replace mlookup and findObject with heapBitsForObject
These functions all serve essentially the same purpose. mlookup is used in only one place and findObject in only three. Use heapBitsForObject instead, which is the most optimized implementation. (This may seem slightly silly because none of these uses care about the heap bits, but we're about to split up the functionality of heapBitsForObject anyway. At that point, findObject will rise from the ashes.) Change-Id: I906468c972be095dd23cf2404a7d4434e802f250 Reviewed-on: https://go-review.googlesource.com/85877 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
b1d94c118f
commit
41e6abdc61
@ -48,7 +48,7 @@ func TestMemStats(t *testing.T) {
|
|||||||
// PauseTotalNs can be 0 if timer resolution is poor.
|
// PauseTotalNs can be 0 if timer resolution is poor.
|
||||||
fields := map[string][]func(interface{}) error{
|
fields := map[string][]func(interface{}) error{
|
||||||
"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
|
"Alloc": {nz, le(1e10)}, "TotalAlloc": {nz, le(1e11)}, "Sys": {nz, le(1e10)},
|
||||||
"Lookups": {nz, le(1e10)}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
|
"Lookups": {eq(uint64(0))}, "Mallocs": {nz, le(1e10)}, "Frees": {nz, le(1e10)},
|
||||||
"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
|
"HeapAlloc": {nz, le(1e10)}, "HeapSys": {nz, le(1e10)}, "HeapIdle": {le(1e10)},
|
||||||
"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
|
"HeapInuse": {nz, le(1e10)}, "HeapReleased": {le(1e10)}, "HeapObjects": {nz, le(1e10)},
|
||||||
"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
|
"StackInuse": {nz, le(1e10)}, "StackSys": {nz, le(1e10)},
|
||||||
|
@ -1852,12 +1852,10 @@ func getgcmask(ep interface{}) (mask []byte) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// heap
|
// heap
|
||||||
var n uintptr
|
if base, hbits, s, _ := heapBitsForObject(uintptr(p), 0, 0); base != 0 {
|
||||||
var base uintptr
|
n := s.elemsize
|
||||||
if mlookup(uintptr(p), &base, &n, nil) != 0 {
|
|
||||||
mask = make([]byte, n/sys.PtrSize)
|
mask = make([]byte, n/sys.PtrSize)
|
||||||
for i := uintptr(0); i < n; i += sys.PtrSize {
|
for i := uintptr(0); i < n; i += sys.PtrSize {
|
||||||
hbits := heapBitsForAddr(base + i)
|
|
||||||
if hbits.isPointer() {
|
if hbits.isPointer() {
|
||||||
mask[i/sys.PtrSize] = 1
|
mask[i/sys.PtrSize] = 1
|
||||||
}
|
}
|
||||||
@ -1865,6 +1863,7 @@ func getgcmask(ep interface{}) (mask []byte) {
|
|||||||
mask = mask[:i/sys.PtrSize]
|
mask = mask[:i/sys.PtrSize]
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
hbits = hbits.next()
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -39,7 +39,6 @@ type mcache struct {
|
|||||||
stackcache [_NumStackOrders]stackfreelist
|
stackcache [_NumStackOrders]stackfreelist
|
||||||
|
|
||||||
// Local allocator stats, flushed during GC.
|
// Local allocator stats, flushed during GC.
|
||||||
local_nlookup uintptr // number of pointer lookups
|
|
||||||
local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
|
local_largefree uintptr // bytes freed for large objects (>maxsmallsize)
|
||||||
local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
|
local_nlargefree uintptr // number of frees for large objects (>maxsmallsize)
|
||||||
local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
|
local_nsmallfree [_NumSizeClasses]uintptr // number of frees for small objects (<=maxsmallsize)
|
||||||
|
@ -326,9 +326,9 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// find the containing object
|
// find the containing object
|
||||||
_, base, _ := findObject(e.data)
|
base, _, _, _ := heapBitsForObject(uintptr(e.data), 0, 0)
|
||||||
|
|
||||||
if base == nil {
|
if base == 0 {
|
||||||
// 0-length objects are okay.
|
// 0-length objects are okay.
|
||||||
if e.data == unsafe.Pointer(&zerobase) {
|
if e.data == unsafe.Pointer(&zerobase) {
|
||||||
return
|
return
|
||||||
@ -353,7 +353,7 @@ func SetFinalizer(obj interface{}, finalizer interface{}) {
|
|||||||
throw("runtime.SetFinalizer: pointer not in allocated block")
|
throw("runtime.SetFinalizer: pointer not in allocated block")
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.data != base {
|
if uintptr(e.data) != base {
|
||||||
// As an implementation detail we allow to set finalizers for an inner byte
|
// As an implementation detail we allow to set finalizers for an inner byte
|
||||||
// of an object if it could come from tiny alloc (see mallocgc for details).
|
// of an object if it could come from tiny alloc (see mallocgc for details).
|
||||||
if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
|
if ot.elem == nil || ot.elem.kind&kindNoPointers == 0 || ot.elem.size >= maxTinySize {
|
||||||
@ -421,46 +421,6 @@ okarg:
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look up pointer v in heap. Return the span containing the object,
|
|
||||||
// the start of the object, and the size of the object. If the object
|
|
||||||
// does not exist, return nil, nil, 0.
|
|
||||||
func findObject(v unsafe.Pointer) (s *mspan, x unsafe.Pointer, n uintptr) {
|
|
||||||
c := gomcache()
|
|
||||||
c.local_nlookup++
|
|
||||||
if sys.PtrSize == 4 && c.local_nlookup >= 1<<30 {
|
|
||||||
// purge cache stats to prevent overflow
|
|
||||||
lock(&mheap_.lock)
|
|
||||||
purgecachedstats(c)
|
|
||||||
unlock(&mheap_.lock)
|
|
||||||
}
|
|
||||||
|
|
||||||
// find span
|
|
||||||
arena_start := mheap_.arena_start
|
|
||||||
arena_used := mheap_.arena_used
|
|
||||||
if uintptr(v) < arena_start || uintptr(v) >= arena_used {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
p := uintptr(v) >> pageShift
|
|
||||||
q := p - arena_start>>pageShift
|
|
||||||
s = mheap_.spans[q]
|
|
||||||
if s == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
x = unsafe.Pointer(s.base())
|
|
||||||
|
|
||||||
if uintptr(v) < uintptr(x) || uintptr(v) >= uintptr(unsafe.Pointer(s.limit)) || s.state != mSpanInUse {
|
|
||||||
s = nil
|
|
||||||
x = nil
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
n = s.elemsize
|
|
||||||
if s.spanclass.sizeclass() != 0 {
|
|
||||||
x = add(x, (uintptr(v)-uintptr(x))/n*n)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
|
// Mark KeepAlive as noinline so that it is easily detectable as an intrinsic.
|
||||||
//go:noinline
|
//go:noinline
|
||||||
|
|
||||||
|
@ -442,55 +442,6 @@ func spanOfUnchecked(p uintptr) *mspan {
|
|||||||
return mheap_.spans[(p-mheap_.arena_start)>>_PageShift]
|
return mheap_.spans[(p-mheap_.arena_start)>>_PageShift]
|
||||||
}
|
}
|
||||||
|
|
||||||
func mlookup(v uintptr, base *uintptr, size *uintptr, sp **mspan) int32 {
|
|
||||||
_g_ := getg()
|
|
||||||
|
|
||||||
_g_.m.mcache.local_nlookup++
|
|
||||||
if sys.PtrSize == 4 && _g_.m.mcache.local_nlookup >= 1<<30 {
|
|
||||||
// purge cache stats to prevent overflow
|
|
||||||
lock(&mheap_.lock)
|
|
||||||
purgecachedstats(_g_.m.mcache)
|
|
||||||
unlock(&mheap_.lock)
|
|
||||||
}
|
|
||||||
|
|
||||||
s := mheap_.lookupMaybe(unsafe.Pointer(v))
|
|
||||||
if sp != nil {
|
|
||||||
*sp = s
|
|
||||||
}
|
|
||||||
if s == nil {
|
|
||||||
if base != nil {
|
|
||||||
*base = 0
|
|
||||||
}
|
|
||||||
if size != nil {
|
|
||||||
*size = 0
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
p := s.base()
|
|
||||||
if s.spanclass.sizeclass() == 0 {
|
|
||||||
// Large object.
|
|
||||||
if base != nil {
|
|
||||||
*base = p
|
|
||||||
}
|
|
||||||
if size != nil {
|
|
||||||
*size = s.npages << _PageShift
|
|
||||||
}
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
n := s.elemsize
|
|
||||||
if base != nil {
|
|
||||||
i := (v - p) / n
|
|
||||||
*base = p + i*n
|
|
||||||
}
|
|
||||||
if size != nil {
|
|
||||||
*size = n
|
|
||||||
}
|
|
||||||
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
|
|
||||||
// Initialize the heap.
|
// Initialize the heap.
|
||||||
func (h *mheap) init(spansStart, spansBytes uintptr) {
|
func (h *mheap) init(spansStart, spansBytes uintptr) {
|
||||||
h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys)
|
h.treapalloc.init(unsafe.Sizeof(treapNode{}), nil, nil, &memstats.other_sys)
|
||||||
@ -1459,12 +1410,12 @@ func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *p
|
|||||||
// situation where it's possible that markrootSpans
|
// situation where it's possible that markrootSpans
|
||||||
// has already run but mark termination hasn't yet.
|
// has already run but mark termination hasn't yet.
|
||||||
if gcphase != _GCoff {
|
if gcphase != _GCoff {
|
||||||
_, base, _ := findObject(p)
|
base, _, _, _ := heapBitsForObject(uintptr(p), 0, 0)
|
||||||
mp := acquirem()
|
mp := acquirem()
|
||||||
gcw := &mp.p.ptr().gcw
|
gcw := &mp.p.ptr().gcw
|
||||||
// Mark everything reachable from the object
|
// Mark everything reachable from the object
|
||||||
// so it's retained for the finalizer.
|
// so it's retained for the finalizer.
|
||||||
scanobject(uintptr(base), gcw)
|
scanobject(base, gcw)
|
||||||
// Mark the finalizer itself, since the
|
// Mark the finalizer itself, since the
|
||||||
// special isn't part of the GC'd heap.
|
// special isn't part of the GC'd heap.
|
||||||
scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
|
scanblock(uintptr(unsafe.Pointer(&s.fn)), sys.PtrSize, &oneptrmask[0], gcw)
|
||||||
|
@ -26,7 +26,7 @@ type mstats struct {
|
|||||||
alloc uint64 // bytes allocated and not yet freed
|
alloc uint64 // bytes allocated and not yet freed
|
||||||
total_alloc uint64 // bytes allocated (even if freed)
|
total_alloc uint64 // bytes allocated (even if freed)
|
||||||
sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
|
sys uint64 // bytes obtained from system (should be sum of xxx_sys below, no locking, approximate)
|
||||||
nlookup uint64 // number of pointer lookups
|
nlookup uint64 // number of pointer lookups (unused)
|
||||||
nmalloc uint64 // number of mallocs
|
nmalloc uint64 // number of mallocs
|
||||||
nfree uint64 // number of frees
|
nfree uint64 // number of frees
|
||||||
|
|
||||||
@ -638,8 +638,6 @@ func purgecachedstats(c *mcache) {
|
|||||||
c.local_scan = 0
|
c.local_scan = 0
|
||||||
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
memstats.tinyallocs += uint64(c.local_tinyallocs)
|
||||||
c.local_tinyallocs = 0
|
c.local_tinyallocs = 0
|
||||||
memstats.nlookup += uint64(c.local_nlookup)
|
|
||||||
c.local_nlookup = 0
|
|
||||||
h.largefree += uint64(c.local_largefree)
|
h.largefree += uint64(c.local_largefree)
|
||||||
c.local_largefree = 0
|
c.local_largefree = 0
|
||||||
h.nlargefree += uint64(c.local_nlargefree)
|
h.nlargefree += uint64(c.local_nlargefree)
|
||||||
|
@ -187,10 +187,10 @@ type symbolizeDataContext struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func raceSymbolizeData(ctx *symbolizeDataContext) {
|
func raceSymbolizeData(ctx *symbolizeDataContext) {
|
||||||
if _, x, n := findObject(unsafe.Pointer(ctx.addr)); x != nil {
|
if base, _, span, _ := heapBitsForObject(ctx.addr, 0, 0); base != 0 {
|
||||||
ctx.heap = 1
|
ctx.heap = 1
|
||||||
ctx.start = uintptr(x)
|
ctx.start = base
|
||||||
ctx.size = n
|
ctx.size = span.elemsize
|
||||||
ctx.res = 1
|
ctx.res = 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user