mirror of
https://github.com/golang/go
synced 2024-11-18 00:54:45 -07:00
runtime: fix sweep termination condition
Currently, there is a chance that the sweep termination condition could flap, causing e.g. runtime.GC to return before all sweep work has not only been drained, but also completed. CL 307915 and CL 307916 attempted to fix this problem, but it is still possible that mheap_.sweepDrained is marked before any outstanding sweepers are accounted for in mheap_.sweepers, leaving a window in which a thread could observe isSweepDone as true before it actually was (and after some time it would revert to false, then true again, depending on the number of outstanding sweepers at that point). This change fixes the sweep termination condition by merging mheap_.sweepers and mheap_.sweepDrained into a single atomic value. This value is updated such that a new potential sweeper will increment the oustanding sweeper count iff there are still outstanding spans to be swept without an outstanding sweeper to pick them up. This design simplifies the sweep termination condition into a single atomic load and comparison and ensures the condition never flaps. Updates #46500. Fixes #45315. Change-Id: I6d69aff156b8d48428c4cc8cfdbf28be346dbf04 Reviewed-on: https://go-review.googlesource.com/c/go/+/333389 Trust: Michael Knyszek <mknyszek@google.com> Run-TryBot: Michael Knyszek <mknyszek@google.com> TryBot-Result: Go Bot <gobot@golang.org> Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
parent
f288526374
commit
3aecb3a8f7
@ -102,14 +102,16 @@ func (c *mcentral) cacheSpan() *mspan {
|
|||||||
spanBudget := 100
|
spanBudget := 100
|
||||||
|
|
||||||
var s *mspan
|
var s *mspan
|
||||||
sl := newSweepLocker()
|
var sl sweepLocker
|
||||||
sg := sl.sweepGen
|
|
||||||
|
|
||||||
// Try partial swept spans first.
|
// Try partial swept spans first.
|
||||||
|
sg := mheap_.sweepgen
|
||||||
if s = c.partialSwept(sg).pop(); s != nil {
|
if s = c.partialSwept(sg).pop(); s != nil {
|
||||||
goto havespan
|
goto havespan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
sl = sweep.active.begin()
|
||||||
|
if sl.valid {
|
||||||
// Now try partial unswept spans.
|
// Now try partial unswept spans.
|
||||||
for ; spanBudget >= 0; spanBudget-- {
|
for ; spanBudget >= 0; spanBudget-- {
|
||||||
s = c.partialUnswept(sg).pop()
|
s = c.partialUnswept(sg).pop()
|
||||||
@ -119,7 +121,7 @@ func (c *mcentral) cacheSpan() *mspan {
|
|||||||
if s, ok := sl.tryAcquire(s); ok {
|
if s, ok := sl.tryAcquire(s); ok {
|
||||||
// We got ownership of the span, so let's sweep it and use it.
|
// We got ownership of the span, so let's sweep it and use it.
|
||||||
s.sweep(true)
|
s.sweep(true)
|
||||||
sl.dispose()
|
sweep.active.end(sl)
|
||||||
goto havespan
|
goto havespan
|
||||||
}
|
}
|
||||||
// We failed to get ownership of the span, which means it's being or
|
// We failed to get ownership of the span, which means it's being or
|
||||||
@ -143,7 +145,7 @@ func (c *mcentral) cacheSpan() *mspan {
|
|||||||
freeIndex := s.nextFreeIndex()
|
freeIndex := s.nextFreeIndex()
|
||||||
if freeIndex != s.nelems {
|
if freeIndex != s.nelems {
|
||||||
s.freeindex = freeIndex
|
s.freeindex = freeIndex
|
||||||
sl.dispose()
|
sweep.active.end(sl)
|
||||||
goto havespan
|
goto havespan
|
||||||
}
|
}
|
||||||
// Add it to the swept list, because sweeping didn't give us any free space.
|
// Add it to the swept list, because sweeping didn't give us any free space.
|
||||||
@ -151,11 +153,12 @@ func (c *mcentral) cacheSpan() *mspan {
|
|||||||
}
|
}
|
||||||
// See comment for partial unswept spans.
|
// See comment for partial unswept spans.
|
||||||
}
|
}
|
||||||
sl.dispose()
|
sweep.active.end(sl)
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
traceGCSweepDone()
|
traceGCSweepDone()
|
||||||
traceDone = true
|
traceDone = true
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// We failed to get a span from the mcentral so get one from mheap.
|
// We failed to get a span from the mcentral so get one from mheap.
|
||||||
s = c.grow()
|
s = c.grow()
|
||||||
|
@ -154,7 +154,7 @@ func gcinit() {
|
|||||||
throw("size of Workbuf is suboptimal")
|
throw("size of Workbuf is suboptimal")
|
||||||
}
|
}
|
||||||
// No sweep on the first cycle.
|
// No sweep on the first cycle.
|
||||||
mheap_.sweepDrained = 1
|
sweep.active.state.Store(sweepDrainedMask)
|
||||||
|
|
||||||
// Initialize GC pacer state.
|
// Initialize GC pacer state.
|
||||||
// Use the environment variable GOGC for the initial gcPercent value.
|
// Use the environment variable GOGC for the initial gcPercent value.
|
||||||
@ -1022,8 +1022,10 @@ func gcMarkTermination(nextTriggerRatio float64) {
|
|||||||
// Those aren't tracked in any sweep lists, so we need to
|
// Those aren't tracked in any sweep lists, so we need to
|
||||||
// count them against sweep completion until we ensure all
|
// count them against sweep completion until we ensure all
|
||||||
// those spans have been forced out.
|
// those spans have been forced out.
|
||||||
sl := newSweepLocker()
|
sl := sweep.active.begin()
|
||||||
sl.blockCompletion()
|
if !sl.valid {
|
||||||
|
throw("failed to set sweep barrier")
|
||||||
|
}
|
||||||
|
|
||||||
systemstack(func() { startTheWorldWithSema(true) })
|
systemstack(func() { startTheWorldWithSema(true) })
|
||||||
|
|
||||||
@ -1050,7 +1052,7 @@ func gcMarkTermination(nextTriggerRatio float64) {
|
|||||||
})
|
})
|
||||||
// Now that we've swept stale spans in mcaches, they don't
|
// Now that we've swept stale spans in mcaches, they don't
|
||||||
// count against unswept spans.
|
// count against unswept spans.
|
||||||
sl.dispose()
|
sweep.active.end(sl)
|
||||||
|
|
||||||
// Print gctrace before dropping worldsema. As soon as we drop
|
// Print gctrace before dropping worldsema. As soon as we drop
|
||||||
// worldsema another cycle could start and smash the stats
|
// worldsema another cycle could start and smash the stats
|
||||||
@ -1457,7 +1459,7 @@ func gcSweep(mode gcMode) {
|
|||||||
|
|
||||||
lock(&mheap_.lock)
|
lock(&mheap_.lock)
|
||||||
mheap_.sweepgen += 2
|
mheap_.sweepgen += 2
|
||||||
mheap_.sweepDrained = 0
|
sweep.active.reset()
|
||||||
mheap_.pagesSwept.Store(0)
|
mheap_.pagesSwept.Store(0)
|
||||||
mheap_.sweepArenas = mheap_.allArenas
|
mheap_.sweepArenas = mheap_.allArenas
|
||||||
mheap_.reclaimIndex.Store(0)
|
mheap_.reclaimIndex.Store(0)
|
||||||
|
@ -41,6 +41,10 @@ type sweepdata struct {
|
|||||||
nbgsweep uint32
|
nbgsweep uint32
|
||||||
npausesweep uint32
|
npausesweep uint32
|
||||||
|
|
||||||
|
// active tracks outstanding sweepers and the sweep
|
||||||
|
// termination condition.
|
||||||
|
active activeSweep
|
||||||
|
|
||||||
// centralIndex is the current unswept span class.
|
// centralIndex is the current unswept span class.
|
||||||
// It represents an index into the mcentral span
|
// It represents an index into the mcentral span
|
||||||
// sets. Accessed and updated via its load and
|
// sets. Accessed and updated via its load and
|
||||||
@ -116,6 +120,108 @@ func (h *mheap) nextSpanForSweep() *mspan {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const sweepDrainedMask = 1 << 31
|
||||||
|
|
||||||
|
// activeSweep is a type that captures whether sweeping
|
||||||
|
// is done, and whether there are any outstanding sweepers.
|
||||||
|
//
|
||||||
|
// Every potential sweeper must call begin() before they look
|
||||||
|
// for work, and end() after they've finished sweeping.
|
||||||
|
type activeSweep struct {
|
||||||
|
// state is divided into two parts.
|
||||||
|
//
|
||||||
|
// The top bit (masked by sweepDrainedMask) is a boolean
|
||||||
|
// value indicating whether all the sweep work has been
|
||||||
|
// drained from the queue.
|
||||||
|
//
|
||||||
|
// The rest of the bits are a counter, indicating the
|
||||||
|
// number of outstanding concurrent sweepers.
|
||||||
|
state atomic.Uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// begin registers a new sweeper. Returns a sweepLocker
|
||||||
|
// for acquiring spans for sweeping. Any outstanding sweeper blocks
|
||||||
|
// sweep termination.
|
||||||
|
//
|
||||||
|
// If the sweepLocker is invalid, the caller can be sure that all
|
||||||
|
// outstanding sweep work has been drained, so there is nothing left
|
||||||
|
// to sweep. Note that there may be sweepers currently running, so
|
||||||
|
// this does not indicate that all sweeping has completed.
|
||||||
|
//
|
||||||
|
// Even if the sweepLocker is invalid, its sweepGen is always valid.
|
||||||
|
func (a *activeSweep) begin() sweepLocker {
|
||||||
|
for {
|
||||||
|
state := a.state.Load()
|
||||||
|
if state&sweepDrainedMask != 0 {
|
||||||
|
return sweepLocker{mheap_.sweepgen, false}
|
||||||
|
}
|
||||||
|
if a.state.CompareAndSwap(state, state+1) {
|
||||||
|
return sweepLocker{mheap_.sweepgen, true}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// end deregisters a sweeper. Must be called once for each time
|
||||||
|
// begin is called if the sweepLocker is valid.
|
||||||
|
func (a *activeSweep) end(sl sweepLocker) {
|
||||||
|
if sl.sweepGen != mheap_.sweepgen {
|
||||||
|
throw("sweeper left outstanding across sweep generations")
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
state := a.state.Load()
|
||||||
|
if (state&^sweepDrainedMask)-1 >= sweepDrainedMask {
|
||||||
|
throw("mismatched begin/end of activeSweep")
|
||||||
|
}
|
||||||
|
if a.state.CompareAndSwap(state, state-1) {
|
||||||
|
if state != sweepDrainedMask {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if debug.gcpacertrace > 0 {
|
||||||
|
print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// markDrained marks the active sweep cycle as having drained
|
||||||
|
// all remaining work. This is safe to be called concurrently
|
||||||
|
// with all other methods of activeSweep, though may race.
|
||||||
|
//
|
||||||
|
// Returns true if this call was the one that actually performed
|
||||||
|
// the mark.
|
||||||
|
func (a *activeSweep) markDrained() bool {
|
||||||
|
for {
|
||||||
|
state := a.state.Load()
|
||||||
|
if state&sweepDrainedMask != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if a.state.CompareAndSwap(state, state|sweepDrainedMask) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sweepers returns the current number of active sweepers.
|
||||||
|
func (a *activeSweep) sweepers() uint32 {
|
||||||
|
return a.state.Load() &^ sweepDrainedMask
|
||||||
|
}
|
||||||
|
|
||||||
|
// isDone returns true if all sweep work has been drained and no more
|
||||||
|
// outstanding sweepers exist. That is, when the sweep phase is
|
||||||
|
// completely done.
|
||||||
|
func (a *activeSweep) isDone() bool {
|
||||||
|
return a.state.Load() == sweepDrainedMask
|
||||||
|
}
|
||||||
|
|
||||||
|
// reset sets up the activeSweep for the next sweep cycle.
|
||||||
|
//
|
||||||
|
// The world must be stopped.
|
||||||
|
func (a *activeSweep) reset() {
|
||||||
|
assertWorldStopped()
|
||||||
|
a.state.Store(0)
|
||||||
|
}
|
||||||
|
|
||||||
// finishsweep_m ensures that all spans are swept.
|
// finishsweep_m ensures that all spans are swept.
|
||||||
//
|
//
|
||||||
// The world must be stopped. This ensures there are no sweeps in
|
// The world must be stopped. This ensures there are no sweeps in
|
||||||
@ -134,6 +240,15 @@ func finishsweep_m() {
|
|||||||
sweep.npausesweep++
|
sweep.npausesweep++
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Make sure there aren't any outstanding sweepers left.
|
||||||
|
// At this point, with the world stopped, it means one of two
|
||||||
|
// things. Either we were able to preempt a sweeper, or that
|
||||||
|
// a sweeper didn't call sweep.active.end when it should have.
|
||||||
|
// Both cases indicate a bug, so throw.
|
||||||
|
if sweep.active.sweepers() != 0 {
|
||||||
|
throw("active sweepers found at start of mark phase")
|
||||||
|
}
|
||||||
|
|
||||||
// Reset all the unswept buffers, which should be empty.
|
// Reset all the unswept buffers, which should be empty.
|
||||||
// Do this in sweep termination as opposed to mark termination
|
// Do this in sweep termination as opposed to mark termination
|
||||||
// so that we can catch unswept spans and reclaim blocks as
|
// so that we can catch unswept spans and reclaim blocks as
|
||||||
@ -183,15 +298,11 @@ func bgsweep(c chan int) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// sweepLocker acquires sweep ownership of spans and blocks sweep
|
// sweepLocker acquires sweep ownership of spans.
|
||||||
// completion.
|
|
||||||
type sweepLocker struct {
|
type sweepLocker struct {
|
||||||
// sweepGen is the sweep generation of the heap.
|
// sweepGen is the sweep generation of the heap.
|
||||||
sweepGen uint32
|
sweepGen uint32
|
||||||
// blocking indicates that this tracker is blocking sweep
|
valid bool
|
||||||
// completion, usually as a result of acquiring sweep
|
|
||||||
// ownership of at least one span.
|
|
||||||
blocking bool
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// sweepLocked represents sweep ownership of a span.
|
// sweepLocked represents sweep ownership of a span.
|
||||||
@ -199,22 +310,16 @@ type sweepLocked struct {
|
|||||||
*mspan
|
*mspan
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSweepLocker() sweepLocker {
|
|
||||||
return sweepLocker{
|
|
||||||
sweepGen: mheap_.sweepgen,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tryAcquire attempts to acquire sweep ownership of span s. If it
|
// tryAcquire attempts to acquire sweep ownership of span s. If it
|
||||||
// successfully acquires ownership, it blocks sweep completion.
|
// successfully acquires ownership, it blocks sweep completion.
|
||||||
func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
|
func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
|
||||||
|
if !l.valid {
|
||||||
|
throw("use of invalid sweepLocker")
|
||||||
|
}
|
||||||
// Check before attempting to CAS.
|
// Check before attempting to CAS.
|
||||||
if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
|
if atomic.Load(&s.sweepgen) != l.sweepGen-2 {
|
||||||
return sweepLocked{}, false
|
return sweepLocked{}, false
|
||||||
}
|
}
|
||||||
// Add ourselves to sweepers before potentially taking
|
|
||||||
// ownership.
|
|
||||||
l.blockCompletion()
|
|
||||||
// Attempt to acquire sweep ownership of s.
|
// Attempt to acquire sweep ownership of s.
|
||||||
if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
|
if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) {
|
||||||
return sweepLocked{}, false
|
return sweepLocked{}, false
|
||||||
@ -222,48 +327,22 @@ func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) {
|
|||||||
return sweepLocked{s}, true
|
return sweepLocked{s}, true
|
||||||
}
|
}
|
||||||
|
|
||||||
// blockCompletion blocks sweep completion without acquiring any
|
|
||||||
// specific spans.
|
|
||||||
func (l *sweepLocker) blockCompletion() {
|
|
||||||
if !l.blocking {
|
|
||||||
atomic.Xadd(&mheap_.sweepers, +1)
|
|
||||||
l.blocking = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *sweepLocker) dispose() {
|
|
||||||
if !l.blocking {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Decrement the number of active sweepers and if this is the
|
|
||||||
// last one, mark sweep as complete.
|
|
||||||
l.blocking = false
|
|
||||||
if atomic.Xadd(&mheap_.sweepers, -1) == 0 && atomic.Load(&mheap_.sweepDrained) != 0 {
|
|
||||||
l.sweepIsDone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *sweepLocker) sweepIsDone() {
|
|
||||||
if debug.gcpacertrace > 0 {
|
|
||||||
print("pacer: sweep done at heap size ", gcController.heapLive>>20, "MB; allocated ", (gcController.heapLive-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// sweepone sweeps some unswept heap span and returns the number of pages returned
|
// sweepone sweeps some unswept heap span and returns the number of pages returned
|
||||||
// to the heap, or ^uintptr(0) if there was nothing to sweep.
|
// to the heap, or ^uintptr(0) if there was nothing to sweep.
|
||||||
func sweepone() uintptr {
|
func sweepone() uintptr {
|
||||||
_g_ := getg()
|
gp := getg()
|
||||||
|
|
||||||
// increment locks to ensure that the goroutine is not preempted
|
// Increment locks to ensure that the goroutine is not preempted
|
||||||
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
|
// in the middle of sweep thus leaving the span in an inconsistent state for next GC
|
||||||
_g_.m.locks++
|
gp.m.locks++
|
||||||
if atomic.Load(&mheap_.sweepDrained) != 0 {
|
|
||||||
_g_.m.locks--
|
|
||||||
return ^uintptr(0)
|
|
||||||
}
|
|
||||||
// TODO(austin): sweepone is almost always called in a loop;
|
// TODO(austin): sweepone is almost always called in a loop;
|
||||||
// lift the sweepLocker into its callers.
|
// lift the sweepLocker into its callers.
|
||||||
sl := newSweepLocker()
|
sl := sweep.active.begin()
|
||||||
|
if !sl.valid {
|
||||||
|
gp.m.locks--
|
||||||
|
return ^uintptr(0)
|
||||||
|
}
|
||||||
|
|
||||||
// Find a span to sweep.
|
// Find a span to sweep.
|
||||||
npages := ^uintptr(0)
|
npages := ^uintptr(0)
|
||||||
@ -271,7 +350,7 @@ func sweepone() uintptr {
|
|||||||
for {
|
for {
|
||||||
s := mheap_.nextSpanForSweep()
|
s := mheap_.nextSpanForSweep()
|
||||||
if s == nil {
|
if s == nil {
|
||||||
noMoreWork = atomic.Cas(&mheap_.sweepDrained, 0, 1)
|
noMoreWork = sweep.active.markDrained()
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
if state := s.state.get(); state != mSpanInUse {
|
if state := s.state.get(); state != mSpanInUse {
|
||||||
@ -301,8 +380,7 @@ func sweepone() uintptr {
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
sweep.active.end(sl)
|
||||||
sl.dispose()
|
|
||||||
|
|
||||||
if noMoreWork {
|
if noMoreWork {
|
||||||
// The sweep list is empty. There may still be
|
// The sweep list is empty. There may still be
|
||||||
@ -331,7 +409,7 @@ func sweepone() uintptr {
|
|||||||
readyForScavenger()
|
readyForScavenger()
|
||||||
}
|
}
|
||||||
|
|
||||||
_g_.m.locks--
|
gp.m.locks--
|
||||||
return npages
|
return npages
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -342,10 +420,7 @@ func sweepone() uintptr {
|
|||||||
// GC runs; to prevent that the caller must be non-preemptible or must
|
// GC runs; to prevent that the caller must be non-preemptible or must
|
||||||
// somehow block GC progress.
|
// somehow block GC progress.
|
||||||
func isSweepDone() bool {
|
func isSweepDone() bool {
|
||||||
// Check that all spans have at least begun sweeping and there
|
return sweep.active.isDone()
|
||||||
// are no active sweepers. If both are true, then all spans
|
|
||||||
// have finished sweeping.
|
|
||||||
return atomic.Load(&mheap_.sweepDrained) != 0 && atomic.Load(&mheap_.sweepers) == 0
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Returns only when span s has been swept.
|
// Returns only when span s has been swept.
|
||||||
@ -359,16 +434,23 @@ func (s *mspan) ensureSwept() {
|
|||||||
throw("mspan.ensureSwept: m is not locked")
|
throw("mspan.ensureSwept: m is not locked")
|
||||||
}
|
}
|
||||||
|
|
||||||
sl := newSweepLocker()
|
// If this operation fails, then that means that there are
|
||||||
|
// no more spans to be swept. In this case, either s has already
|
||||||
|
// been swept, or is about to be acquired for sweeping and swept.
|
||||||
|
sl := sweep.active.begin()
|
||||||
|
if sl.valid {
|
||||||
// The caller must be sure that the span is a mSpanInUse span.
|
// The caller must be sure that the span is a mSpanInUse span.
|
||||||
if s, ok := sl.tryAcquire(s); ok {
|
if s, ok := sl.tryAcquire(s); ok {
|
||||||
s.sweep(false)
|
s.sweep(false)
|
||||||
sl.dispose()
|
sweep.active.end(sl)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
sl.dispose()
|
sweep.active.end(sl)
|
||||||
|
}
|
||||||
|
|
||||||
// unfortunate condition, and we don't have efficient means to wait
|
// Unfortunately we can't sweep the span ourselves. Somebody else
|
||||||
|
// got to it first. We don't have efficient means to wait, but that's
|
||||||
|
// OK, it will be swept fairly soon.
|
||||||
for {
|
for {
|
||||||
spangen := atomic.Load(&s.sweepgen)
|
spangen := atomic.Load(&s.sweepgen)
|
||||||
if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
|
if spangen == sl.sweepGen || spangen == sl.sweepGen+3 {
|
||||||
|
@ -66,8 +66,6 @@ type mheap struct {
|
|||||||
pages pageAlloc // page allocation data structure
|
pages pageAlloc // page allocation data structure
|
||||||
|
|
||||||
sweepgen uint32 // sweep generation, see comment in mspan; written during STW
|
sweepgen uint32 // sweep generation, see comment in mspan; written during STW
|
||||||
sweepDrained uint32 // all spans are swept or are being swept
|
|
||||||
sweepers uint32 // number of active sweepone calls
|
|
||||||
|
|
||||||
// allspans is a slice of all mspans ever created. Each mspan
|
// allspans is a slice of all mspans ever created. Each mspan
|
||||||
// appears exactly once.
|
// appears exactly once.
|
||||||
@ -815,7 +813,10 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
|
|||||||
|
|
||||||
n0 := n
|
n0 := n
|
||||||
var nFreed uintptr
|
var nFreed uintptr
|
||||||
sl := newSweepLocker()
|
sl := sweep.active.begin()
|
||||||
|
if !sl.valid {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
for n > 0 {
|
for n > 0 {
|
||||||
ai := arenas[pageIdx/pagesPerArena]
|
ai := arenas[pageIdx/pagesPerArena]
|
||||||
ha := h.arenas[ai.l1()][ai.l2()]
|
ha := h.arenas[ai.l1()][ai.l2()]
|
||||||
@ -861,7 +862,7 @@ func (h *mheap) reclaimChunk(arenas []arenaIdx, pageIdx, n uintptr) uintptr {
|
|||||||
pageIdx += uintptr(len(inUse) * 8)
|
pageIdx += uintptr(len(inUse) * 8)
|
||||||
n -= uintptr(len(inUse) * 8)
|
n -= uintptr(len(inUse) * 8)
|
||||||
}
|
}
|
||||||
sl.dispose()
|
sweep.active.end(sl)
|
||||||
if trace.enabled {
|
if trace.enabled {
|
||||||
unlock(&h.lock)
|
unlock(&h.lock)
|
||||||
// Account for pages scanned but not reclaimed.
|
// Account for pages scanned but not reclaimed.
|
||||||
|
@ -85,17 +85,6 @@ func TestMemoryProfiler(t *testing.T) {
|
|||||||
|
|
||||||
runtime.GC() // materialize stats
|
runtime.GC() // materialize stats
|
||||||
|
|
||||||
// TODO(mknyszek): Fix #45315 and remove this extra call.
|
|
||||||
//
|
|
||||||
// Unfortunately, it's possible for the sweep termination condition
|
|
||||||
// to flap, so with just one runtime.GC call, a freed object could be
|
|
||||||
// missed, leading this test to fail. A second call reduces the chance
|
|
||||||
// of this happening to zero, because sweeping actually has to finish
|
|
||||||
// to move on to the next GC, during which nothing will happen.
|
|
||||||
//
|
|
||||||
// See #46500 for more details.
|
|
||||||
runtime.GC()
|
|
||||||
|
|
||||||
memoryProfilerRun++
|
memoryProfilerRun++
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
|
Loading…
Reference in New Issue
Block a user