1
0
mirror of https://github.com/golang/go synced 2024-11-22 20:40:03 -07:00

runtime: consolidate "is sweep done" conditions

The runtime currently has two different notions of sweep completion:

1. All spans are either swept or have begun sweeping.

2. The sweeper has *finished* sweeping all spans.

Having both is confusing (it doesn't help that the documentation is
often unclear or wrong). Condition 2 is stronger and the theoretical
slight optimization that condition 1 could impact is never actually
useful. Hence, this CL consolidates both conditions down to condition 2.

Updates #45315.

Change-Id: I55c84d767d74eb31a004a5619eaba2e351162332
Reviewed-on: https://go-review.googlesource.com/c/go/+/307916
Trust: Austin Clements <austin@google.com>
Run-TryBot: Austin Clements <austin@google.com>
TryBot-Result: Go Bot <gobot@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
Austin Clements 2021-04-06 19:25:28 -04:00
parent a25a77aed2
commit 1b736b3c19
3 changed files with 18 additions and 14 deletions

View File

@ -175,7 +175,7 @@ func gcinit() {
} }
// No sweep on the first cycle. // No sweep on the first cycle.
mheap_.sweepdone = 1 mheap_.sweepDrained = 1
// Set a reasonable initial GC trigger. // Set a reasonable initial GC trigger.
memstats.triggerRatio = 7 / 8.0 memstats.triggerRatio = 7 / 8.0
@ -1187,7 +1187,7 @@ func GC() {
// First, wait for sweeping to finish. (We know there are no // First, wait for sweeping to finish. (We know there are no
// more spans on the sweep queue, but we may be concurrently // more spans on the sweep queue, but we may be concurrently
// sweeping spans, so we have to wait.) // sweeping spans, so we have to wait.)
for atomic.Load(&work.cycles) == n+1 && atomic.Load(&mheap_.sweepers) != 0 { for atomic.Load(&work.cycles) == n+1 && !isSweepDone() {
Gosched() Gosched()
} }
@ -2192,7 +2192,7 @@ func gcSweep(mode gcMode) {
lock(&mheap_.lock) lock(&mheap_.lock)
mheap_.sweepgen += 2 mheap_.sweepgen += 2
mheap_.sweepdone = 0 mheap_.sweepDrained = 0
mheap_.pagesSwept = 0 mheap_.pagesSwept = 0
mheap_.sweepArenas = mheap_.allArenas mheap_.sweepArenas = mheap_.allArenas
mheap_.reclaimIndex = 0 mheap_.reclaimIndex = 0

View File

@ -238,7 +238,7 @@ func (l *sweepLocker) dispose() {
// Decrement the number of active sweepers and if this is the // Decrement the number of active sweepers and if this is the
// last one, mark sweep as complete. // last one, mark sweep as complete.
l.blocking = false l.blocking = false
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepdone) != 0 { if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
l.sweepIsDone() l.sweepIsDone()
} }
} }
@ -257,7 +257,7 @@ func sweepone() uintptr {
// increment locks to ensure that the goroutine is not preempted // increment locks to ensure that the goroutine is not preempted
// in the middle of sweep thus leaving the span in an inconsistent state for next GC // in the middle of sweep thus leaving the span in an inconsistent state for next GC
_g_.m.locks++ _g_.m.locks++
if atomic.Load(&mheap_.sweepdone) != 0 { if atomic.Load(&mheap_.sweepDrained) != 0 {
_g_.m.locks-- _g_.m.locks--
return ^uintptr(0) return ^uintptr(0)
} }
@ -271,7 +271,7 @@ func sweepone() uintptr {
for { for {
s := mheap_.nextSpanForSweep() s := mheap_.nextSpanForSweep()
if s == nil { if s == nil {
noMoreWork = atomic.Cas(&mheap_.sweepdone, 0, 1) noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
break break
} }
if state := s.state.get(); state != mSpanInUse { if state := s.state.get(); state != mSpanInUse {
@ -335,14 +335,17 @@ func sweepone() uintptr {
return npages return npages
} }
// isSweepDone reports whether all spans are swept or currently being swept. // isSweepDone reports whether all spans are swept.
// //
// Note that this condition may transition from false to true at any // Note that this condition may transition from false to true at any
// time as the sweeper runs. It may transition from true to false if a // time as the sweeper runs. It may transition from true to false if a
// GC runs; to prevent that the caller must be non-preemptible or must // GC runs; to prevent that the caller must be non-preemptible or must
// somehow block GC progress. // somehow block GC progress.
func isSweepDone() bool { func isSweepDone() bool {
return mheap_.sweepdone != 0 // Check that all spans have at least begun sweeping and there
// are no active sweepers. If both are true, then all spans
// have finished sweeping.
return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
} }
// Returns only when span s has been swept. // Returns only when span s has been swept.

View File

@ -62,11 +62,12 @@ const (
type mheap struct { type mheap struct {
// lock must only be acquired on the system stack, otherwise a g // lock must only be acquired on the system stack, otherwise a g
// could self-deadlock if its stack grows with the lock held. // could self-deadlock if its stack grows with the lock held.
lock mutex lock mutex
pages pageAlloc // page allocation data structure pages pageAlloc // page allocation data structure
sweepgen uint32 // sweep generation, see comment in mspan; written during STW
sweepdone uint32 // all spans are swept sweepgen uint32 // sweep generation, see comment in mspan; written during STW
sweepers uint32 // number of active sweepone calls sweepDrained uint32 // all spans are swept or are being swept
sweepers uint32 // number of active sweepone calls
// allspans is a slice of all mspans ever created. Each mspan // allspans is a slice of all mspans ever created. Each mspan
// appears exactly once. // appears exactly once.
@ -904,7 +905,7 @@ func (h *mheap) alloc(npages uintptr, spanclass spanClass, needzero bool) *mspan
systemstack(func() { systemstack(func() {
// To prevent excessive heap growth, before allocating n pages // To prevent excessive heap growth, before allocating n pages
// we need to sweep and reclaim at least n pages. // we need to sweep and reclaim at least n pages.
if h.sweepdone == 0 { if !isSweepDone() {
h.reclaim(npages) h.reclaim(npages)
} }
s = h.allocSpan(npages, spanAllocHeap, spanclass) s = h.allocSpan(npages, spanAllocHeap, spanclass)