1
0
mirror of https://github.com/golang/go synced 2024-11-23 14:30:02 -07:00

runtime: make the span allocation purpose more explicit

This change modifies mheap's span allocation API to have each caller
declare a purpose, defined as a new enum called spanAllocType.

The purpose behind this change is two-fold:
1. Tight control over who gets to allocate heap memory is, generally
   speaking, a good thing. Every codepath that allocates heap memory
   places additional implicit restrictions on the allocator. A notable
   example of a restriction is work bufs coming from heap memory: write
   barriers are not allowed in allocation paths because then we could
   have a situation where the allocator calls into the allocator.
2. Memory statistic updating is explicit. Instead of passing an opaque
   pointer for statistic updating, which places restrictions on how that
   statistic may be updated, we use the spanAllocType to determine which
   statistic to update and how.

We also take this opportunity to group all the statistic updating code
together, which should make the accounting code a little easier to
follow.

Change-Id: Ic0b0898959ba2a776f67122f0e36c9d7d60e3085
Reviewed-on: https://go-review.googlesource.com/c/go/+/246970
Trust: Michael Knyszek <mknyszek@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Anthony Knyszek 2020-07-29 19:00:37 +00:00 committed by Michael Knyszek
parent c863849800
commit dc02578ac8
4 changed files with 68 additions and 30 deletions

View File

@ -1868,12 +1868,12 @@ func materializeGCProg(ptrdata uintptr, prog *byte) *mspan {
bitmapBytes := divRoundUp(ptrdata, 8*sys.PtrSize)
// Compute the number of pages needed for bitmapBytes.
pages := divRoundUp(bitmapBytes, pageSize)
s := mheap_.allocManual(pages, &memstats.gc_sys)
s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
runGCProg(addb(prog, 4), nil, (*byte)(unsafe.Pointer(s.startAddr)), 1)
return s
}
func dematerializeGCProg(s *mspan) {
mheap_.freeManual(s, &memstats.gc_sys)
mheap_.freeManual(s, spanAllocPtrScalarBits)
}
func dumpGCProg(p *byte) {

View File

@ -371,7 +371,7 @@ func getempty() *workbuf {
}
if s == nil {
systemstack(func() {
s = mheap_.allocManual(workbufAlloc/pageSize, &memstats.gc_sys)
s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
})
if s == nil {
throw("out of memory")
@ -473,7 +473,7 @@ func freeSomeWbufs(preemptible bool) bool {
break
}
work.wbufSpans.free.remove(span)
mheap_.freeManual(span, &memstats.gc_sys)
mheap_.freeManual(span, spanAllocWorkBuf)
}
})
more := !work.wbufSpans.free.isEmpty()

View File

@ -861,6 +861,22 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
return nFreed
}
// spanAllocType represents the type of allocation to make, or
// the type of allocation to be freed.
type spanAllocType uint8
const (
spanAllocHeap spanAllocType = iota // heap span
spanAllocStack // stack span
spanAllocPtrScalarBits // unrolled GC prog bitmap span
spanAllocWorkBuf // work buf span
)
// manual returns true if the span allocation is manually managed.
func (s spanAllocType) manual() bool {
return s != spanAllocHeap
}
// alloc allocates a new span of npage pages from the GC'd heap.
//
// spanclass indicates the span's size class and scannability.
@ -877,7 +893,7 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan
if h.sweepdone == 0 {
h.reclaim(npages)
}
s = h.allocSpan(npages, false, spanclass, &memstats.heap_inuse)
s = h.allocSpan(npages, spanAllocHeap, spanclass)
})
if s != nil {
@ -902,9 +918,15 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan
// allocManual must be called on the system stack because it may
// acquire the heap lock via allocSpan. See mheap for details.
//
// If new code is written to call allocManual, do NOT use an
// existing spanAllocType value and instead declare a new one.
//
//go:systemstack
func (h *mheap) allocManual(npages uintptr, stat *uint64) *mspan {
return h.allocSpan(npages, true, 0, stat)
func (h *mheap) allocManual(npages uintptr, typ spanAllocType) *mspan {
if !typ.manual() {
throw("manual span allocation called with non-manually-managed type")
}
return h.allocSpan(npages, typ, 0)
}
// setSpans modifies the span map so [spanOf(base), spanOf(base+npage*pageSize))
@ -1066,7 +1088,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) {
// allocSpan allocates an mspan which owns npages worth of memory.
//
// If manual == false, allocSpan allocates a heap span of class spanclass
// If typ.manual() == false, allocSpan allocates a heap span of class spanclass
// and updates heap accounting. If manual == true, allocSpan allocates a
// manually-managed span (spanclass is ignored), and the caller is
// responsible for any accounting related to its use of the span. Either
@ -1081,7 +1103,7 @@ func (h *mheap) freeMSpanLocked(s *mspan) {
// the heap lock and because it must block GC transitions.
//
//go:systemstack
func (h *mheap) allocSpan(npages uintptr, manual bool, spanclass spanClass, sysStat *uint64) (s *mspan) {
func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) {
// Function-global state.
gp := getg()
base, scav := uintptr(0), uintptr(0)
@ -1143,12 +1165,10 @@ HaveSpan:
s.needzero = 1
}
nbytes := npages * pageSize
if manual {
if typ.manual() {
s.manualFreeList = 0
s.nelems = 0
s.limit = s.base() + s.npages*pageSize
// Manually managed memory doesn't count toward heap_sys.
mSysStatDec(&memstats.heap_sys, s.npages*pageSize)
s.state.set(mSpanManual)
} else {
// We must set span properties before the span is published anywhere
@ -1205,7 +1225,18 @@ HaveSpan:
mSysStatDec(&memstats.heap_released, scav)
}
// Update stats.
mSysStatInc(sysStat, nbytes)
switch typ {
case spanAllocHeap:
mSysStatInc(&memstats.heap_inuse, nbytes)
case spanAllocStack:
mSysStatInc(&memstats.stacks_inuse, nbytes)
case spanAllocPtrScalarBits, spanAllocWorkBuf:
mSysStatInc(&memstats.gc_sys, nbytes)
}
if typ.manual() {
// Manually managed memory doesn't count toward heap_sys.
mSysStatDec(&memstats.heap_sys, nbytes)
}
mSysStatDec(&memstats.heap_idle, nbytes)
// Publish the span in various locations.
@ -1217,7 +1248,7 @@ HaveSpan:
// before that happens) or pageInUse is updated.
h.setSpans(s.base(), npages, s)
if !manual {
if !typ.manual() {
// Mark in-use span in arena page bitmap.
//
// This publishes the span to the page sweeper, so
@ -1323,13 +1354,13 @@ func (h *mheap) freeSpan(s *mspan) {
bytes := s.npages << _PageShift
msanfree(base, bytes)
}
h.freeSpanLocked(s, true, true)
h.freeSpanLocked(s, spanAllocHeap)
unlock(&h.lock)
})
}
// freeManual frees a manually-managed span returned by allocManual.
// stat must be the same as the stat passed to the allocManual that
// typ must be the same as the spanAllocType passed to the allocManual that
// allocated s.
//
// This must only be called when gcphase == _GCoff. See mSpanState for
@ -1339,16 +1370,14 @@ func (h *mheap) freeSpan(s *mspan) {
// the heap lock. See mheap for details.
//
//go:systemstack
func (h *mheap) freeManual(s *mspan, stat *uint64) {
func (h *mheap) freeManual(s *mspan, typ spanAllocType) {
s.needzero = 1
lock(&h.lock)
mSysStatDec(stat, s.npages*pageSize)
mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
h.freeSpanLocked(s, false, true)
h.freeSpanLocked(s, typ)
unlock(&h.lock)
}
func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
switch s.state.get() {
case mSpanManual:
if s.allocCount != 0 {
@ -1368,12 +1397,21 @@ func (h *mheap) freeSpanLocked(s *mspan, acctinuse, acctidle bool) {
throw("mheap.freeSpanLocked - invalid span state")
}
if acctinuse {
// Update stats.
//
// Mirrors the code in allocSpan.
switch typ {
case spanAllocHeap:
mSysStatDec(&memstats.heap_inuse, s.npages*pageSize)
case spanAllocStack:
mSysStatDec(&memstats.stacks_inuse, s.npages*pageSize)
case spanAllocPtrScalarBits, spanAllocWorkBuf:
mSysStatDec(&memstats.gc_sys, s.npages*pageSize)
}
if acctidle {
mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
if typ.manual() {
mSysStatInc(&memstats.heap_sys, s.npages*pageSize)
}
mSysStatInc(&memstats.heap_idle, s.npages*pageSize)
// Mark the space as free.
h.pages.free(s.base(), s.npages)

View File

@ -187,7 +187,7 @@ func stackpoolalloc(order uint8) gclinkptr {
lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
if s == nil {
// no free stacks. Allocate another span worth.
s = mheap_.allocManual(_StackCacheSize>>_PageShift, &memstats.stacks_inuse)
s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
if s == nil {
throw("out of memory")
}
@ -251,7 +251,7 @@ func stackpoolfree(x gclinkptr, order uint8) {
stackpool[order].item.span.remove(s)
s.manualFreeList = 0
osStackFree(s)
mheap_.freeManual(s, &memstats.stacks_inuse)
mheap_.freeManual(s, spanAllocStack)
}
}
@ -396,7 +396,7 @@ func stackalloc(n uint32) stack {
if s == nil {
// Allocate a new stack from the heap.
s = mheap_.allocManual(npage, &memstats.stacks_inuse)
s = mheap_.allocManual(npage, spanAllocStack)
if s == nil {
throw("out of memory")
}
@ -480,7 +480,7 @@ func stackfree(stk stack) {
// Free the stack immediately if we're
// sweeping.
osStackFree(s)
mheap_.freeManual(s, &memstats.stacks_inuse)
mheap_.freeManual(s, spanAllocStack)
} else {
// If the GC is running, we can't return a
// stack span to the heap because it could be
@ -1193,7 +1193,7 @@ func freeStackSpans() {
list.remove(s)
s.manualFreeList = 0
osStackFree(s)
mheap_.freeManual(s, &memstats.stacks_inuse)
mheap_.freeManual(s, spanAllocStack)
}
s = next
}
@ -1207,7 +1207,7 @@ func freeStackSpans() {
next := s.next
stackLarge.free[i].remove(s)
osStackFree(s)
mheap_.freeManual(s, &memstats.stacks_inuse)
mheap_.freeManual(s, spanAllocStack)
s = next
}
}