1
0
mirror of https://github.com/golang/go synced 2024-09-30 02:14:29 -06:00

runtime: split marking of span roots into 128 subtasks

Marking of span roots can represent a significant fraction of the time
spent in mark termination. Simply traversing the span list takes about
1ms per GB of heap and if there are a large number of finalizers (for
example, for network connections), it may take much longer.

Improve the situation by splitting the span scan into 128 subtasks
that can be executed in parallel and load balanced by the markroots
parallel for. This lets the GC balance this job across the Ps.

A better solution is to do this during concurrent mark, or to improve
it algorithmically, but this is a simple change with a lot of bang for
the buck.

This was suggested by Rhys Hiltner.

Updates #11485.

Change-Id: I8b281adf0ba827064e154a1b6cc32d4d8031c03c
Reviewed-on: https://go-review.googlesource.com/13112
Reviewed-by: Keith Randall <khr@golang.org>
This commit is contained in:
Austin Clements 2015-08-04 10:45:29 -04:00
parent 739f133837
commit 572f08a064
2 changed files with 45 additions and 32 deletions

View File

@ -129,9 +129,10 @@ const (
_RootData = 0
_RootBss = 1
_RootFinalizers = 2
_RootSpans = 3
_RootFlushCaches = 4
_RootCount = 5
_RootSpans0 = 3
_RootSpansShards = 128
_RootFlushCaches = _RootSpans0 + _RootSpansShards
_RootCount = _RootFlushCaches + 1
// sweepMinHeapDistance is a lower bound on the heap distance
// (in bytes) reserved for concurrent sweeping between GC

View File

@ -74,41 +74,18 @@ func markroot(desc *parfor, i uint32) {
scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), uintptr(fb.cnt)*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], &gcw)
}
case _RootSpans:
// mark MSpan.specials
sg := mheap_.sweepgen
for spanidx := uint32(0); spanidx < uint32(len(work.spans)); spanidx++ {
s := work.spans[spanidx]
if s.state != mSpanInUse {
continue
}
if !useCheckmark && s.sweepgen != sg {
// sweepgen was updated (+2) during non-checkmark GC pass
print("sweep ", s.sweepgen, " ", sg, "\n")
throw("gc: unswept span")
}
for sp := s.specials; sp != nil; sp = sp.next {
if sp.kind != _KindSpecialFinalizer {
continue
}
// don't mark finalized object, but scan it so we
// retain everything it points to.
spf := (*specialfinalizer)(unsafe.Pointer(sp))
// A finalizer can be set for an inner byte of an object, find object beginning.
p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
if gcphase != _GCscan {
scanobject(p, &gcw) // scanned during mark termination
}
scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptrmask[0], &gcw)
}
}
case _RootFlushCaches:
if gcphase != _GCscan { // Do not flush mcaches during GCscan phase.
flushallmcaches()
}
default:
if _RootSpans0 <= i && i < _RootSpans0+_RootSpansShards {
// mark MSpan.specials
markrootSpans(&gcw, int(i)-_RootSpans0)
break
}
// the rest is scanning goroutine stacks
if uintptr(i-_RootCount) >= allglen {
throw("markroot: bad index")
@ -136,6 +113,41 @@ func markroot(desc *parfor, i uint32) {
gcw.dispose()
}
// markrootSpans marks roots for one shard (out of _RootSpansShards)
// of work.spans.
//
//go:nowritebarrier
func markrootSpans(gcw *gcWork, shard int) {
sg := mheap_.sweepgen
startSpan := shard * len(work.spans) / _RootSpansShards
endSpan := (shard + 1) * len(work.spans) / _RootSpansShards
for spanidx := startSpan; spanidx < endSpan; spanidx++ {
s := work.spans[spanidx]
if s.state != mSpanInUse {
continue
}
if !useCheckmark && s.sweepgen != sg {
// sweepgen was updated (+2) during non-checkmark GC pass
print("sweep ", s.sweepgen, " ", sg, "\n")
throw("gc: unswept span")
}
for sp := s.specials; sp != nil; sp = sp.next {
if sp.kind != _KindSpecialFinalizer {
continue
}
// don't mark finalized object, but scan it so we
// retain everything it points to.
spf := (*specialfinalizer)(unsafe.Pointer(sp))
// A finalizer can be set for an inner byte of an object, find object beginning.
p := uintptr(s.start<<_PageShift) + uintptr(spf.special.offset)/s.elemsize*s.elemsize
if gcphase != _GCscan {
scanobject(p, gcw) // scanned during mark termination
}
scanblock(uintptr(unsafe.Pointer(&spf.fn)), ptrSize, &oneptrmask[0], gcw)
}
}
}
// gcAssistAlloc records and allocation of size bytes and, if
// allowAssist is true, may assist GC scanning in proportion to the
// allocations performed by this mutator since the last assist.