1
0
mirror of https://github.com/golang/go synced 2024-09-29 01:34:32 -06:00

runtime: overestimate the amount of allocated memory in heapLive

CL 377516 made it so that memory metrics are truly monotonic, but also
updated how heapLive tracked allocated memory to also be monotonic.

The result is that cached spans with allocated memory aren't fully
accounted for by the GC, causing it to make a worse assumption (the
exact mechanism is at this time unknown), resulting in a memory
regression, especially for smaller heaps.

This change is a partial revert of CL 377516 that makes heapLive a
non-monotonic overestimate again, which appears to resolve the
regression.

For #53738.

Change-Id: I5c51067abc0b8e0a6b89dd8dbd4a0be2e8c0c1b2
Reviewed-on: https://go-review.googlesource.com/c/go/+/416417
Reviewed-by: Michael Pratt <mpratt@google.com>
Run-TryBot: Michael Knyszek <mknyszek@google.com>
TryBot-Result: Gopher Robot <gobot@golang.org>
This commit is contained in:
Michael Anthony Knyszek 2022-07-07 20:01:21 +00:00 committed by Michael Knyszek
parent c177d9d98a
commit 1ebc983000

View File

@ -173,10 +173,6 @@ func (c *mcache) refill(spc spanClass) {
bytesAllocated := slotsUsed * int64(s.elemsize) bytesAllocated := slotsUsed * int64(s.elemsize)
gcController.totalAlloc.Add(bytesAllocated) gcController.totalAlloc.Add(bytesAllocated)
// Update heapLive and flush scanAlloc.
gcController.update(bytesAllocated, int64(c.scanAlloc))
c.scanAlloc = 0
// Clear the second allocCount just to be safe. // Clear the second allocCount just to be safe.
s.allocCountBeforeCache = 0 s.allocCountBeforeCache = 0
} }
@ -198,6 +194,23 @@ func (c *mcache) refill(spc spanClass) {
// Store the current alloc count for accounting later. // Store the current alloc count for accounting later.
s.allocCountBeforeCache = s.allocCount s.allocCountBeforeCache = s.allocCount
// Update heapLive and flush scanAlloc.
//
// We have not yet allocated anything new into the span, but we
// assume that all of its slots will get used, so this makes
// heapLive an overestimate.
//
// When the span gets uncached, we'll fix up this overestimate
// if necessary (see releaseAll).
//
// We pick an overestimate here because an underestimate leads
// the pacer to believe that it's in better shape than it is,
// which appears to lead to more memory used. See #53738 for
// more details.
usedBytes := uintptr(s.allocCount) * s.elemsize
gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc))
c.scanAlloc = 0
c.alloc[spc] = s c.alloc[spc] = s
} }
@ -247,6 +260,8 @@ func (c *mcache) releaseAll() {
scanAlloc := int64(c.scanAlloc) scanAlloc := int64(c.scanAlloc)
c.scanAlloc = 0 c.scanAlloc = 0
sg := mheap_.sweepgen
dHeapLive := int64(0)
for i := range c.alloc { for i := range c.alloc {
s := c.alloc[i] s := c.alloc[i]
if s != &emptymspan { if s != &emptymspan {
@ -262,6 +277,15 @@ func (c *mcache) releaseAll() {
// We assumed earlier that the full span gets allocated. // We assumed earlier that the full span gets allocated.
gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize)) gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))
if s.sweepgen != sg+1 {
// refill conservatively counted unallocated slots in gcController.heapLive.
// Undo this.
//
// If this span was cached before sweep, then gcController.heapLive was totally
// recomputed since caching this span, so we don't do this for stale spans.
dHeapLive -= int64(uintptr(s.nelems)-uintptr(s.allocCount)) * int64(s.elemsize)
}
// Release the span to the mcentral. // Release the span to the mcentral.
mheap_.central[i].mcentral.uncacheSpan(s) mheap_.central[i].mcentral.uncacheSpan(s)
c.alloc[i] = &emptymspan c.alloc[i] = &emptymspan
@ -277,8 +301,8 @@ func (c *mcache) releaseAll() {
c.tinyAllocs = 0 c.tinyAllocs = 0
memstats.heapStats.release() memstats.heapStats.release()
// Updated heapScan. // Update heapLive and heapScan.
gcController.update(0, scanAlloc) gcController.update(dHeapLive, scanAlloc)
} }
// prepareForSweep flushes c if the system has entered a new sweep phase // prepareForSweep flushes c if the system has entered a new sweep phase