1
0
mirror of https://github.com/golang/go synced 2024-11-11 22:20:22 -07:00

runtime: break down memstats.gc_sys

This change breaks apart gc_sys into three distinct pieces. Two of those
pieces are pieces which come from heap_sys since they're allocated from
the page heap. The rest comes from memory mapped from e.g.
persistentalloc which better fits the purpose of a sysMemStat. Also,
rename gc_sys to gcMiscSys.

Change-Id: I098789170052511e7b31edbcdc9a53e5c24573f7
Reviewed-on: https://go-review.googlesource.com/c/go/+/246973
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Trust: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Anthony Knyszek 2020-08-03 19:23:30 +00:00 committed by Michael Knyszek
parent 39e335ac06
commit ad863ba32a
7 changed files with 39 additions and 27 deletions

View File

@ -540,6 +540,9 @@ func dumpms() {
}
func dumpmemstats() {
// These ints should be identical to the exported
// MemStats structure and should be ordered the same
// way too.
dumpint(tagMemStats)
dumpint(memstats.alloc)
dumpint(memstats.total_alloc)
@ -560,7 +563,7 @@ func dumpmemstats() {
dumpint(memstats.mcache_inuse)
dumpint(memstats.mcache_sys.load())
dumpint(memstats.buckhash_sys.load())
dumpint(memstats.gc_sys.load())
dumpint(memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse)
dumpint(memstats.other_sys.load())
dumpint(memstats.next_gc)
dumpint(memstats.last_gc_unix)

View File

@ -743,9 +743,9 @@ mapped:
throw("arena already initialized")
}
var r *heapArena
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
r = (*heapArena)(h.heapArenaAlloc.alloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
if r == nil {
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gc_sys))
r = (*heapArena)(persistentalloc(unsafe.Sizeof(*r), sys.PtrSize, &memstats.gcMiscSys))
if r == nil {
throw("out of memory allocating heap arena metadata")
}
@ -757,7 +757,7 @@ mapped:
if size == 0 {
size = physPageSize
}
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gc_sys))
newArray := (*notInHeap)(persistentalloc(size, sys.PtrSize, &memstats.gcMiscSys))
if newArray == nil {
throw("out of memory allocating allArenas")
}

View File

@ -41,7 +41,7 @@ func startCheckmarks() {
if bitmap == nil {
// Allocate bitmap on first use.
bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gc_sys))
bitmap = (*checkmarksMap)(persistentalloc(unsafe.Sizeof(*bitmap), 0, &memstats.gcMiscSys))
if bitmap == nil {
throw("out of memory allocating checkmarks bitmap")
}

View File

@ -88,7 +88,7 @@ func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot
lock(&finlock)
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
if finc == nil {
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gc_sys))
finc = (*finblock)(persistentalloc(_FinBlockSize, 0, &memstats.gcMiscSys))
finc.alllink = allfin
allfin = finc
if finptrmask[0] == 0 {

View File

@ -713,7 +713,7 @@ func (h *mheap) init() {
h.central[i].mcentral.init(spanClass(i))
}
h.pages.init(&h.lock, &memstats.gc_sys)
h.pages.init(&h.lock, &memstats.gcMiscSys)
}
// reclaim sweeps and reclaims at least npage pages into the heap.
@ -1230,8 +1230,10 @@ HaveSpan:
atomic.Xadd64(&memstats.heap_inuse, int64(nbytes))
case spanAllocStack:
atomic.Xadd64(&memstats.stacks_inuse, int64(nbytes))
case spanAllocPtrScalarBits, spanAllocWorkBuf:
memstats.gc_sys.add(int64(nbytes))
case spanAllocWorkBuf:
atomic.Xadd64(&memstats.gcWorkBufInUse, int64(nbytes))
case spanAllocPtrScalarBits:
atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, int64(nbytes))
}
if typ.manual() {
// Manually managed memory doesn't count toward heap_sys.
@ -1406,8 +1408,10 @@ func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) {
atomic.Xadd64(&memstats.heap_inuse, -int64(nbytes))
case spanAllocStack:
atomic.Xadd64(&memstats.stacks_inuse, -int64(nbytes))
case spanAllocPtrScalarBits, spanAllocWorkBuf:
memstats.gc_sys.add(-int64(nbytes))
case spanAllocWorkBuf:
atomic.Xadd64(&memstats.gcWorkBufInUse, -int64(nbytes))
case spanAllocPtrScalarBits:
atomic.Xadd64(&memstats.gcProgPtrScalarBitsInUse, -int64(nbytes))
}
if typ.manual() {
// Manually managed memory doesn't count toward heap_sys, so add it back.
@ -1956,7 +1960,7 @@ func newArenaMayUnlock() *gcBitsArena {
var result *gcBitsArena
if gcBitsArenas.free == nil {
unlock(&gcBitsArenas.lock)
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gc_sys))
result = (*gcBitsArena)(sysAlloc(gcBitsChunkBytes, &memstats.gcMiscSys))
if result == nil {
throw("runtime: cannot allocate memory")
}

View File

@ -102,7 +102,7 @@ retry:
if newCap == 0 {
newCap = spanSetInitSpineCap
}
newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gc_sys)
newSpine := persistentalloc(newCap*sys.PtrSize, cpu.CacheLineSize, &memstats.gcMiscSys)
if b.spineCap != 0 {
// Blocks are allocated off-heap, so
// no write barriers.
@ -283,7 +283,7 @@ func (p *spanSetBlockAlloc) alloc() *spanSetBlock {
if s := (*spanSetBlock)(p.stack.pop()); s != nil {
return s
}
return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gc_sys))
return (*spanSetBlock)(persistentalloc(unsafe.Sizeof(spanSetBlock{}), cpu.CacheLineSize, &memstats.gcMiscSys))
}
// free returns a spanSetBlock back to the pool.

View File

@ -44,15 +44,17 @@ type mstats struct {
// Statistics about allocation of low-level fixed-size structures.
// Protected by FixAlloc locks.
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures
mspan_sys sysMemStat
mcache_inuse uint64 // mcache structures
mcache_sys sysMemStat
buckhash_sys sysMemStat // profiling bucket hash table
gc_sys sysMemStat // updated atomically or during STW
other_sys sysMemStat // updated atomically or during STW
stacks_inuse uint64 // bytes in manually-managed stack spans; updated atomically or during STW
stacks_sys sysMemStat // only counts newosproc0 stack in mstats; differs from MemStats.StackSys
mspan_inuse uint64 // mspan structures
mspan_sys sysMemStat
mcache_inuse uint64 // mcache structures
mcache_sys sysMemStat
buckhash_sys sysMemStat // profiling bucket hash table
gcWorkBufInUse uint64 // updated atomically or during STW
gcProgPtrScalarBitsInUse uint64 // updated atomically or during STW
gcMiscSys sysMemStat // updated atomically or during STW
other_sys sysMemStat // updated atomically or during STW
// Statistics about the garbage collector.
@ -472,7 +474,10 @@ func readmemstats_m(stats *MemStats) {
stats.MCacheInuse = memstats.mcache_inuse
stats.MCacheSys = memstats.mcache_sys.load()
stats.BuckHashSys = memstats.buckhash_sys.load()
stats.GCSys = memstats.gc_sys.load()
// MemStats defines GCSys as an aggregate of all memory related
// to the memory management system, but we track this memory
// at a more granular level in the runtime.
stats.GCSys = memstats.gcMiscSys.load() + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
stats.OtherSys = memstats.other_sys.load()
stats.NextGC = memstats.next_gc
stats.LastGC = memstats.last_gc_unix
@ -557,11 +562,11 @@ func updatememstats() {
memstats.mcache_inuse = uint64(mheap_.cachealloc.inuse)
memstats.mspan_inuse = uint64(mheap_.spanalloc.inuse)
memstats.sys = memstats.heap_sys.load() + memstats.stacks_sys.load() + memstats.mspan_sys.load() +
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gc_sys.load() +
memstats.mcache_sys.load() + memstats.buckhash_sys.load() + memstats.gcMiscSys.load() +
memstats.other_sys.load()
// We also count stacks_inuse as sys memory.
memstats.sys += memstats.stacks_inuse
// We also count stacks_inuse, gcWorkBufInUse, and gcProgPtrScalarBitsInUse as sys memory.
memstats.sys += memstats.stacks_inuse + memstats.gcWorkBufInUse + memstats.gcProgPtrScalarBitsInUse
// Calculate memory allocator stats.
// During program execution we only count number of frees and amount of freed memory.