1
0
mirror of https://github.com/golang/go synced 2024-11-22 16:04:40 -07:00

runtime: move roots' bases calculation to gcMarkRootPrepare

This patch provides changes according to Austin's TODO. It just moves
calculation of base indexes of each root type from markroot function
to gcMarkRootPrepare.

Change-Id: Ib231de34e7f81e922762fc3ee2b1830921c0c7cf
Reviewed-on: https://go-review.googlesource.com/c/go/+/279461
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Austin Clements <austin@google.com>
This commit is contained in:
Ruslan Andreev 2020-12-22 19:22:14 +08:00 committed by Austin Clements
parent ab02cbd29f
commit 82e4a6310b
2 changed files with 22 additions and 19 deletions

View File

@ -328,6 +328,10 @@ var work struct {
nFlushCacheRoots int
nDataRoots, nBSSRoots, nSpanRoots, nStackRoots int
// Base indexes of each root type. Set by gcMarkRootPrepare.
baseFlushCache uint32
baseData, baseBSS, baseSpans, baseStacks, baseEnd uint32
// Each type of GC state transition is protected by a lock.
// Since multiple threads can simultaneously detect the state
// transition condition, any thread that detects a transition

View File

@ -106,6 +106,14 @@ func gcMarkRootPrepare() {
work.markrootNext = 0
work.markrootJobs = uint32(fixedRootCount + work.nFlushCacheRoots + work.nDataRoots + work.nBSSRoots + work.nSpanRoots + work.nStackRoots)
// Calculate base indexes of each root type
work.baseFlushCache = uint32(fixedRootCount)
work.baseData = work.baseFlushCache + uint32(work.nFlushCacheRoots)
work.baseBSS = work.baseData + uint32(work.nDataRoots)
work.baseSpans = work.baseBSS + uint32(work.nBSSRoots)
work.baseStacks = work.baseSpans + uint32(work.nSpanRoots)
work.baseEnd = work.baseStacks + uint32(work.nStackRoots)
}
// gcMarkRootCheck checks that all roots have been scanned. It is
@ -149,28 +157,19 @@ var oneptrmask = [...]uint8{1}
//
//go:nowritebarrier
func markroot(gcw *gcWork, i uint32) {
// TODO(austin): This is a bit ridiculous. Compute and store
// the bases in gcMarkRootPrepare instead of the counts.
baseFlushCache := uint32(fixedRootCount)
baseData := baseFlushCache + uint32(work.nFlushCacheRoots)
baseBSS := baseData + uint32(work.nDataRoots)
baseSpans := baseBSS + uint32(work.nBSSRoots)
baseStacks := baseSpans + uint32(work.nSpanRoots)
end := baseStacks + uint32(work.nStackRoots)
// Note: if you add a case here, please also update heapdump.go:dumproots.
switch {
case baseFlushCache <= i && i < baseData:
flushmcache(int(i - baseFlushCache))
case work.baseFlushCache <= i && i < work.baseData:
flushmcache(int(i - work.baseFlushCache))
case baseData <= i && i < baseBSS:
case work.baseData <= i && i < work.baseBSS:
for _, datap := range activeModules() {
markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-baseData))
markrootBlock(datap.data, datap.edata-datap.data, datap.gcdatamask.bytedata, gcw, int(i-work.baseData))
}
case baseBSS <= i && i < baseSpans:
case work.baseBSS <= i && i < work.baseSpans:
for _, datap := range activeModules() {
markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-baseBSS))
markrootBlock(datap.bss, datap.ebss-datap.bss, datap.gcbssmask.bytedata, gcw, int(i-work.baseBSS))
}
case i == fixedRootFinalizers:
@ -184,18 +183,18 @@ func markroot(gcw *gcWork, i uint32) {
// stackfree.
systemstack(markrootFreeGStacks)
case baseSpans <= i && i < baseStacks:
case work.baseSpans <= i && i < work.baseStacks:
// mark mspan.specials
markrootSpans(gcw, int(i-baseSpans))
markrootSpans(gcw, int(i-work.baseSpans))
default:
// the rest is scanning goroutine stacks
var gp *g
if baseStacks <= i && i < end {
if work.baseStacks <= i && i < work.baseEnd {
// N.B. Atomic read of allglen in gcMarkRootPrepare
// acts as a barrier to ensure that allgs must be large
// enough to contain all relevant Gs.
gp = allgs[i-baseStacks]
gp = allgs[i-work.baseStacks]
} else {
throw("markroot: bad index")
}