1
0
mirror of https://github.com/golang/go synced 2024-11-18 15:54:42 -07:00

runtime: split gc_m into gcMark and gcSweep

This is a nice split but more importantly it provides a better
way to fit the checkmark phase into the sequencing.

Also factor out common span copying into gcSpanCopy.

Change-Id: Ia058644974e4ed4ac3cf4b017a3446eb2284d053
Reviewed-on: https://go-review.googlesource.com/5333
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Russ Cox 2015-02-19 16:43:27 -05:00
parent 929597b9e9
commit 89a091de24
3 changed files with 79 additions and 119 deletions

View File

@ -120,7 +120,6 @@ import "unsafe"
const ( const (
_DebugGC = 0 _DebugGC = 0
_DebugGCPtrs = false // if true, print trace of every pointer load during GC
_ConcurrentSweep = true _ConcurrentSweep = true
_FinBlockSize = 4 * 1024 _FinBlockSize = 4 * 1024
_RootData = 0 _RootData = 0
@ -357,41 +356,39 @@ func gc(mode int) {
// TODO(rsc): Should the concurrent GC clear pools earlier? // TODO(rsc): Should the concurrent GC clear pools earlier?
clearpools() clearpools()
_g_ := getg()
_g_.m.traceback = 2
gp := _g_.m.curg
casgstatus(gp, _Grunning, _Gwaiting)
gp.waitreason = "garbage collection"
// Run gc on the g0 stack. We do this so that the g stack // Run gc on the g0 stack. We do this so that the g stack
// we're currently running on will no longer change. Cuts // we're currently running on will no longer change. Cuts
// the root set down a bit (g0 stacks are not scanned, and // the root set down a bit (g0 stacks are not scanned, and
// we don't need to scan gc's internal state). We also // we don't need to scan gc's internal state). We also
// need to switch to g0 so we can shrink the stack. // need to switch to g0 so we can shrink the stack.
systemstack(func() { systemstack(func() {
gc_m(startTime, mode == gcForceBlockMode) gcMark(startTime)
}) if debug.gccheckmark > 0 {
// Run a full stop-the-world mark using checkmark bits,
systemstack(func() { // to check that we didn't forget to mark anything during
// Called from malloc.go using systemstack. // the concurrent mark process.
// The world is stopped. Rerun the scan and mark phases
// using the bitMarkedCheck bit instead of the
// bitMarked bit. If the marking encounters an
// bitMarked bit that is not set then we throw.
//go:nowritebarrier
if debug.gccheckmark == 0 {
return
}
if checkmarkphase {
throw("gccheckmark_m, entered with checkmarkphase already true")
}
checkmarkphase = true
initCheckmarks() initCheckmarks()
gc_m(startTime, mode == gcForceBlockMode) // turns off checkmarkphase + calls clearcheckmarkbits gcMark(startTime)
}) clearCheckmarks()
}
gcSweep(mode)
if debug.gctrace > 1 { if debug.gctrace > 1 {
startTime = nanotime() startTime = nanotime()
systemstack(func() { finishsweep_m()
gc_m(startTime, mode == gcForceBlockMode) gcMark(startTime)
}) gcSweep(mode)
} }
})
_g_.m.traceback = 0
casgstatus(gp, _Gwaiting, _Grunning)
if trace.enabled { if trace.enabled {
traceGCDone() traceGCDone()
@ -427,56 +424,24 @@ func gc(mode int) {
} }
} }
// gcMark runs the mark (or, for concurrent GC, mark termination)
// STW is in effect at this point. // STW is in effect at this point.
//TODO go:nowritebarrier //TODO go:nowritebarrier
func gc_m(start_time int64, eagersweep bool) { func gcMark(start_time int64) {
if _DebugGCPtrs {
print("GC start\n")
}
_g_ := getg()
gp := _g_.m.curg
casgstatus(gp, _Grunning, _Gwaiting)
gp.waitreason = "garbage collection"
gcphase = _GCmarktermination
if debug.allocfreetrace > 0 { if debug.allocfreetrace > 0 {
tracegc() tracegc()
} }
_g_.m.traceback = 2
t0 := start_time t0 := start_time
work.tstart = start_time work.tstart = start_time
gcphase = _GCmarktermination
var t1 int64 var t1 int64
if debug.gctrace > 0 { if debug.gctrace > 0 {
t1 = nanotime() t1 = nanotime()
} }
if !checkmarkphase { gcCopySpans()
// TODO(austin) This is a noop beceause we should
// already have swept everything to the current
// sweepgen.
finishsweep_m() // skip during checkmark debug phase.
}
// Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
// resizing/freeing allspans.
// New spans can be created while GC progresses, but they are not garbage for
// this round:
// - new stack spans can be created even while the world is stopped.
// - new malloc spans can be created during the concurrent sweep
// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
lock(&mheap_.lock)
// Free the old cached sweep array if necessary.
if work.spans != nil && &work.spans[0] != &h_allspans[0] {
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
}
// Cache the current array for marking.
mheap_.gcspans = mheap_.allspans
work.spans = h_allspans
unlock(&mheap_.lock)
work.nwait = 0 work.nwait = 0
work.ndone = 0 work.ndone = 0
@ -584,35 +549,31 @@ func gc_m(start_time int64, eagersweep bool) {
sweep.nbgsweep = 0 sweep.nbgsweep = 0
sweep.npausesweep = 0 sweep.npausesweep = 0
} }
if debug.gccheckmark > 0 {
if !checkmarkphase {
// first half of two-pass; don't set up sweep
casgstatus(gp, _Gwaiting, _Grunning)
return
}
checkmarkphase = false // done checking marks
clearCheckmarks()
} }
// See the comment in the beginning of this function as to why we need the following. func gcSweep(mode int) {
// Even if this is still stop-the-world, a concurrent exitsyscall can allocate a stack from heap. gcCopySpans()
lock(&mheap_.lock) lock(&mheap_.lock)
// Free the old cached mark array if necessary.
if work.spans != nil && &work.spans[0] != &h_allspans[0] {
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
}
// Cache the current array for sweeping.
mheap_.gcspans = mheap_.allspans
mheap_.sweepgen += 2 mheap_.sweepgen += 2
mheap_.sweepdone = 0 mheap_.sweepdone = 0
work.spans = h_allspans
sweep.spanidx = 0 sweep.spanidx = 0
unlock(&mheap_.lock) unlock(&mheap_.lock)
if _ConcurrentSweep && !eagersweep { if !_ConcurrentSweep || mode == gcForceBlockMode {
lock(&gclock) // Special case synchronous sweep.
// Sweep all spans eagerly.
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
// Do an additional mProf_GC, because all 'free' events are now real as well.
mProf_GC()
mProf_GC()
return
}
// Background sweep.
lock(&sweep.lock)
if !sweep.started { if !sweep.started {
go bgsweep() go bgsweep()
sweep.started = true sweep.started = true
@ -620,24 +581,27 @@ func gc_m(start_time int64, eagersweep bool) {
sweep.parked = false sweep.parked = false
ready(sweep.g) ready(sweep.g)
} }
unlock(&gclock) unlock(&sweep.lock)
} else {
// Sweep all spans eagerly.
for sweepone() != ^uintptr(0) {
sweep.npausesweep++
}
// Do an additional mProf_GC, because all 'free' events are now real as well.
mProf_GC() mProf_GC()
} }
mProf_GC() func gcCopySpans() {
_g_.m.traceback = 0 // Cache runtime.mheap_.allspans in work.spans to avoid conflicts with
// resizing/freeing allspans.
if _DebugGCPtrs { // New spans can be created while GC progresses, but they are not garbage for
print("GC end\n") // this round:
// - new stack spans can be created even while the world is stopped.
// - new malloc spans can be created during the concurrent sweep
// Even if this is stop-the-world, a concurrent exitsyscall can allocate a stack from heap.
lock(&mheap_.lock)
// Free the old cached mark array if necessary.
if work.spans != nil && &work.spans[0] != &h_allspans[0] {
sysFree(unsafe.Pointer(&work.spans[0]), uintptr(len(work.spans))*unsafe.Sizeof(work.spans[0]), &memstats.other_sys)
} }
// Cache the current array for sweeping.
casgstatus(gp, _Gwaiting, _Grunning) mheap_.gcspans = mheap_.allspans
work.spans = h_allspans
unlock(&mheap_.lock)
} }
// Hooks for other packages // Hooks for other packages

View File

@ -86,7 +86,7 @@ func markroot(desc *parfor, i uint32) {
if s.state != mSpanInUse { if s.state != mSpanInUse {
continue continue
} }
if !checkmarkphase && s.sweepgen != sg { if !useCheckmark && s.sweepgen != sg {
// sweepgen was updated (+2) during non-checkmark GC pass // sweepgen was updated (+2) during non-checkmark GC pass
print("sweep ", s.sweepgen, " ", sg, "\n") print("sweep ", s.sweepgen, " ", sg, "\n")
throw("gc: unswept span") throw("gc: unswept span")
@ -458,7 +458,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, gcw *gcWorkProducer) {
} }
if bits&typePointer != typePointer { if bits&typePointer != typePointer {
print("gc checkmarkphase=", checkmarkphase, " b=", hex(b), " ptrmask=", ptrmask, "\n") print("gc useCheckmark=", useCheckmark, " b=", hex(b), " ptrmask=", ptrmask, "\n")
throw("unexpected garbage collection bits") throw("unexpected garbage collection bits")
} }
@ -470,7 +470,7 @@ func scanobject(b, n uintptr, ptrmask *uint8, gcw *gcWorkProducer) {
continue continue
} }
if mheap_.shadow_enabled && debug.wbshadow >= 2 && debug.gccheckmark > 0 && checkmarkphase { if mheap_.shadow_enabled && debug.wbshadow >= 2 && debug.gccheckmark > 0 && useCheckmark {
checkwbshadow((*uintptr)(unsafe.Pointer(b + i))) checkwbshadow((*uintptr)(unsafe.Pointer(b + i)))
} }
@ -528,7 +528,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, gcw *gcWorkProducer) {
throw("greyobject: obj not pointer-aligned") throw("greyobject: obj not pointer-aligned")
} }
if checkmarkphase { if useCheckmark {
if !hbits.isMarked() { if !hbits.isMarked() {
print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n") print("runtime:greyobject: checkmarks finds unexpected unmarked object obj=", hex(obj), "\n")
print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n") print("runtime: found obj at *(", hex(base), "+", hex(off), ")\n")
@ -591,7 +591,7 @@ func greyobject(obj, base, off uintptr, hbits heapBits, gcw *gcWorkProducer) {
hbits.setMarked() hbits.setMarked()
} }
if !checkmarkphase && hbits.typeBits() == typeDead { if !useCheckmark && hbits.typeBits() == typeDead {
return // noscan object return // noscan object
} }
@ -611,7 +611,7 @@ func gcmarknewobject_m(obj uintptr) {
if gcphase != _GCmarktermination { if gcphase != _GCmarktermination {
throw("marking new object while not in mark termination phase") throw("marking new object while not in mark termination phase")
} }
if checkmarkphase { // The world should be stopped so this should not happen. if useCheckmark { // The world should be stopped so this should not happen.
throw("gcmarknewobject called while doing checkmark") throw("gcmarknewobject called while doing checkmark")
} }
@ -636,13 +636,14 @@ func gcmarknewobject_m(obj uintptr) {
// there are no more pointers in the object. This information is held // there are no more pointers in the object. This information is held
// in the second nibble. // in the second nibble.
// When marking an object if the bool checkmarkphase is true one uses the above // If useCheckmark is true, marking of an object uses the
// encoding, otherwise one uses the bitMarked bit in the lower two bits // checkmark bits (encoding above) instead of the standard
// of the nibble. // mark bits.
var checkmarkphase = false var useCheckmark = false
//go:nowritebarrier //go:nowritebarrier
func initCheckmarks() { func initCheckmarks() {
useCheckmark = true
for _, s := range work.spans { for _, s := range work.spans {
if s.state == _MSpanInUse { if s.state == _MSpanInUse {
heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout()) heapBitsForSpan(s.base()).initCheckmarkSpan(s.layout())
@ -651,6 +652,7 @@ func initCheckmarks() {
} }
func clearCheckmarks() { func clearCheckmarks() {
useCheckmark = false
for _, s := range work.spans { for _, s := range work.spans {
if s.state == _MSpanInUse { if s.state == _MSpanInUse {
heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout()) heapBitsForSpan(s.base()).clearCheckmarkSpan(s.layout())

View File

@ -11,8 +11,8 @@ import "unsafe"
var sweep sweepdata var sweep sweepdata
// State of background sweep. // State of background sweep.
// Protected by gclock.
type sweepdata struct { type sweepdata struct {
lock mutex
g *g g *g
parked bool parked bool
started bool started bool
@ -23,8 +23,6 @@ type sweepdata struct {
npausesweep uint32 npausesweep uint32
} }
var gclock mutex
//go:nowritebarrier //go:nowritebarrier
func finishsweep_m() { func finishsweep_m() {
// The world is stopped so we should be able to complete the sweeps // The world is stopped so we should be able to complete the sweeps
@ -51,16 +49,16 @@ func bgsweep() {
sweep.nbgsweep++ sweep.nbgsweep++
Gosched() Gosched()
} }
lock(&gclock) lock(&sweep.lock)
if !gosweepdone() { if !gosweepdone() {
// This can happen if a GC runs between // This can happen if a GC runs between
// gosweepone returning ^0 above // gosweepone returning ^0 above
// and the lock being acquired. // and the lock being acquired.
unlock(&gclock) unlock(&sweep.lock)
continue continue
} }
sweep.parked = true sweep.parked = true
goparkunlock(&gclock, "GC sweep wait", traceEvGoBlock) goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock)
} }
} }
@ -145,10 +143,6 @@ func mSpan_EnsureSwept(s *mspan) {
// caller takes care of it. // caller takes care of it.
//TODO go:nowritebarrier //TODO go:nowritebarrier
func mSpan_Sweep(s *mspan, preserve bool) bool { func mSpan_Sweep(s *mspan, preserve bool) bool {
if checkmarkphase {
throw("MSpan_Sweep: checkmark only runs in STW and after the sweep")
}
// It's critical that we enter this function with preemption disabled, // It's critical that we enter this function with preemption disabled,
// GC must not start while we are in the middle of this function. // GC must not start while we are in the middle of this function.
_g_ := getg() _g_ := getg()