mirror of
https://github.com/golang/go
synced 2024-11-23 13:10:15 -07:00
runtime: enable GC assists ASAP
Currently the GC coordinator enables GC assists at the same time it enables background mark workers, after the concurrent scan phase is done. However, this means a rapidly allocating mutator has the entire scan phase during which to allocate beyond the heap trigger and potentially beyond the heap goal with no back-pressure from assists. This prevents the feedback system that's supposed to keep the heap size under the heap goal from doing its job. Fix this by enabling mutator assists during the scan phase. This is safe because the write barrier is already enabled and globally acknowledged at this point. There's still a very small window between when the heap size reaches the heap trigger and when the GC coordinator is able to stop the world during which the mutator can allocate unabated. This allows *very* rapidly allocator mutators like TestTraceStress to still occasionally exceed the heap goal by a small amount (~20 MB at most for TestTraceStress). However, this seems like a corner case. Fixes #11677. Change-Id: I0f80d949ec82341cd31ca1604a626efb7295a819 Reviewed-on: https://go-review.googlesource.com/12674 Reviewed-by: Russ Cox <rsc@golang.org>
This commit is contained in:
parent
f5e67e53e7
commit
510fd1350d
@ -956,6 +956,15 @@ func gc(mode int) {
|
||||
// boundaries where there are up-pointers.
|
||||
setGCPhase(_GCscan)
|
||||
|
||||
gcBgMarkPrepare() // Must happen before assist enable.
|
||||
|
||||
// At this point all Ps have enabled the write
|
||||
// barrier, thus maintaining the no white to
|
||||
// black invariant. Enable mutator assists to
|
||||
// put back-pressure on fast allocating
|
||||
// mutators.
|
||||
atomicstore(&gcBlackenEnabled, 1)
|
||||
|
||||
// Concurrent scan.
|
||||
startTheWorldWithSema()
|
||||
now = nanotime()
|
||||
@ -976,17 +985,13 @@ func gc(mode int) {
|
||||
forEachP(func(*p) {})
|
||||
})
|
||||
// Concurrent mark.
|
||||
gcBgMarkPrepare() // Must happen before assist enable.
|
||||
// At this point all Ps have enabled the mark phase
|
||||
// write barrier, thus maintaining the no white to
|
||||
// black invariant. Mutator assists and mark workers
|
||||
// can now be enabled to safely blacken grey objects.
|
||||
atomicstore(&gcBlackenEnabled, 1)
|
||||
if debug.gctrace > 0 {
|
||||
tMark = nanotime()
|
||||
}
|
||||
|
||||
// Wait for background mark completion.
|
||||
// Enable background mark workers and wait for
|
||||
// background mark completion.
|
||||
gcController.bgMarkStartTime = nanotime()
|
||||
work.bgMark1.clear()
|
||||
work.bgMark1.wait()
|
||||
|
||||
@ -1230,7 +1235,6 @@ func gcBgMarkPrepare() {
|
||||
// Reset background mark completion points.
|
||||
work.bgMark1.done = 1
|
||||
work.bgMark2.done = 1
|
||||
gcController.bgMarkStartTime = nanotime()
|
||||
}
|
||||
|
||||
func gcBgMarkWorker(p *p) {
|
||||
|
@ -29,9 +29,7 @@ func gcscan_m() {
|
||||
// Prepare flag indicating that the scan has not been completed.
|
||||
local_allglen := gcResetGState()
|
||||
|
||||
work.nwait = 0
|
||||
work.ndone = 0
|
||||
work.nproc = 1
|
||||
useOneP := uint32(1) // For now do not do this in parallel.
|
||||
// ackgcphase is not needed since we are not scanning running goroutines.
|
||||
parforsetup(work.markfor, useOneP, uint32(_RootCount+local_allglen), false, markroot)
|
||||
|
Loading…
Reference in New Issue
Block a user