mirror of
https://github.com/golang/go
synced 2024-09-28 18:14:29 -06:00
runtime: use span.elemsize for accounting in mallocgc
Currently the final size computed for an object in mallocgc excludes the allocation header. This is correct in a number of cases, but definitely wrong for memory profiling because the "free" side accounts for the full allocation slot. This change makes an explicit distinction between the parts of mallocgc that care about the full allocation slot size ("the GC's accounting") and those that don't (pointer+len should always be valid). It then applies the appropriate size to the different forms of accounting in mallocgc. For #64153. Change-Id: I481b34b2bb9ff923b59e8408ab2b8fb9025ba944 Reviewed-on: https://go-review.googlesource.com/c/go/+/542735 LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> Auto-Submit: Michael Knyszek <mknyszek@google.com> Reviewed-by: Cherry Mui <cherryyz@google.com>
This commit is contained in:
parent
0b31a46f1f
commit
f67b2d8f0b
@ -589,7 +589,7 @@ func newUserArenaChunk() (unsafe.Pointer, *mspan) {
|
||||
// This may be racing with GC so do it atomically if there can be
|
||||
// a race marking the bit.
|
||||
if gcphase != _GCoff {
|
||||
gcmarknewobject(span, span.base(), span.elemsize)
|
||||
gcmarknewobject(span, span.base())
|
||||
}
|
||||
|
||||
if raceenabled {
|
||||
|
@ -1221,12 +1221,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||
// This may be racing with GC so do it atomically if there can be
|
||||
// a race marking the bit.
|
||||
if gcphase != _GCoff {
|
||||
// Pass the full size of the allocation to the number of bytes
|
||||
// marked.
|
||||
//
|
||||
// If !goexperiment.AllocHeaders, "size" doesn't include the
|
||||
// allocation header, so use span.elemsize unconditionally.
|
||||
gcmarknewobject(span, uintptr(x), span.elemsize)
|
||||
gcmarknewobject(span, uintptr(x))
|
||||
}
|
||||
|
||||
if raceenabled {
|
||||
@ -1248,12 +1243,28 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||
asanunpoison(x, userSize)
|
||||
}
|
||||
|
||||
// If !goexperiment.AllocHeaders, "size" doesn't include the
|
||||
// allocation header, so use span.elemsize as the "full" size
|
||||
// for various computations below.
|
||||
//
|
||||
// TODO(mknyszek): We should really count the header as part
|
||||
// of gc_sys or something, but it's risky to change the
|
||||
// accounting so much right now. Just pretend its internal
|
||||
// fragmentation and match the GC's accounting by using the
|
||||
// whole allocation slot.
|
||||
fullSize := size
|
||||
if goexperiment.AllocHeaders {
|
||||
fullSize = span.elemsize
|
||||
}
|
||||
if rate := MemProfileRate; rate > 0 {
|
||||
// Note cache c only valid while m acquired; see #47302
|
||||
if rate != 1 && size < c.nextSample {
|
||||
c.nextSample -= size
|
||||
//
|
||||
// N.B. Use the full size because that matches how the GC
|
||||
// will update the mem profile on the "free" side.
|
||||
if rate != 1 && fullSize < c.nextSample {
|
||||
c.nextSample -= fullSize
|
||||
} else {
|
||||
profilealloc(mp, x, size)
|
||||
profilealloc(mp, x, fullSize)
|
||||
}
|
||||
}
|
||||
mp.mallocing = 0
|
||||
@ -1268,6 +1279,7 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||
if goexperiment.AllocHeaders && header != nil {
|
||||
throw("unexpected malloc header in delayed zeroing of large object")
|
||||
}
|
||||
// N.B. size == fullSize always in this case.
|
||||
memclrNoHeapPointersChunked(size, x) // This is a possible preemption point: see #47302
|
||||
}
|
||||
|
||||
@ -1278,14 +1290,17 @@ func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer {
|
||||
|
||||
if inittrace.active && inittrace.id == getg().goid {
|
||||
// Init functions are executed sequentially in a single goroutine.
|
||||
inittrace.bytes += uint64(size)
|
||||
inittrace.bytes += uint64(fullSize)
|
||||
}
|
||||
}
|
||||
|
||||
if assistG != nil {
|
||||
// Account for internal fragmentation in the assist
|
||||
// debt now that we know it.
|
||||
assistG.gcAssistBytes -= int64(size - dataSize)
|
||||
//
|
||||
// N.B. Use the full size because that's how the rest
|
||||
// of the GC accounts for bytes marked.
|
||||
assistG.gcAssistBytes -= int64(fullSize - dataSize)
|
||||
}
|
||||
|
||||
if shouldhelpgc {
|
||||
|
@ -1718,7 +1718,7 @@ func gcDumpObject(label string, obj, off uintptr) {
|
||||
//
|
||||
//go:nowritebarrier
|
||||
//go:nosplit
|
||||
func gcmarknewobject(span *mspan, obj, size uintptr) {
|
||||
func gcmarknewobject(span *mspan, obj uintptr) {
|
||||
if useCheckmark { // The world should be stopped so this should not happen.
|
||||
throw("gcmarknewobject called while doing checkmark")
|
||||
}
|
||||
@ -1734,7 +1734,7 @@ func gcmarknewobject(span *mspan, obj, size uintptr) {
|
||||
}
|
||||
|
||||
gcw := &getg().m.p.ptr().gcw
|
||||
gcw.bytesMarked += uint64(size)
|
||||
gcw.bytesMarked += uint64(span.elemsize)
|
||||
}
|
||||
|
||||
// gcMarkTinyAllocs greys all active tiny alloc blocks.
|
||||
|
Loading…
Reference in New Issue
Block a user