mirror of
https://github.com/golang/go
synced 2024-11-18 10:04:43 -07:00
runtime: don't rescan finalizers queue during mark termination
Currently we scan the finalizers queue both during concurrent mark and during mark termination. This costs roughly 20ns per queued finalizer and about 1ns per unused finalizer queue slot (allocated queue length never decreases), which can drive up STW time if there are many finalizers. However, we only add finalizers to this queue during sweeping, which means that the second scan will never find anything new. Hence, we can fix this by simply not scanning the finalizers queue during mark termination. This brings the STW time under the 100µs goal even with 1,000,000 queued finalizers. Fixes #18869. Change-Id: I4ce5620c66fb7f13ebeb39ca313ce57047d1d0fb Reviewed-on: https://go-review.googlesource.com/36013 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Rick Hudson <rlh@golang.org>
This commit is contained in:
parent
98da2d1f91
commit
f1ba75f8c5
@ -12,8 +12,12 @@ import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// finblock is an array of finalizers to be executed. finblocks are
|
||||
// arranged in a linked list for the finalizer queue.
|
||||
//
|
||||
// finblock is allocated from non-GC'd memory, so any heap pointers
|
||||
// must be specially handled.
|
||||
// must be specially handled. GC currently assumes that the finalizer
|
||||
// queue does not grow during marking (but it can shrink).
|
||||
//
|
||||
//go:notinheap
|
||||
type finblock struct {
|
||||
@ -71,6 +75,16 @@ var finalizer1 = [...]byte{
|
||||
}
|
||||
|
||||
func queuefinalizer(p unsafe.Pointer, fn *funcval, nret uintptr, fint *_type, ot *ptrtype) {
|
||||
if gcphase != _GCoff {
|
||||
// Currently we assume that the finalizer queue won't
|
||||
// grow during marking so we don't have to rescan it
|
||||
// during mark termination. If we ever need to lift
|
||||
// this assumption, we can do it by adding the
|
||||
// necessary barriers to queuefinalizer (which it may
|
||||
// have automatically).
|
||||
throw("queuefinalizer during GC")
|
||||
}
|
||||
|
||||
lock(&finlock)
|
||||
if finq == nil || finq.cnt == uint32(len(finq.fin)) {
|
||||
if finc == nil {
|
||||
|
@ -201,6 +201,11 @@ func markroot(gcw *gcWork, i uint32) {
|
||||
}
|
||||
|
||||
case i == fixedRootFinalizers:
|
||||
// Only do this once per GC cycle since we don't call
|
||||
// queuefinalizer during marking.
|
||||
if work.markrootDone {
|
||||
break
|
||||
}
|
||||
for fb := allfin; fb != nil; fb = fb.alllink {
|
||||
cnt := uintptr(atomic.Load(&fb.cnt))
|
||||
scanblock(uintptr(unsafe.Pointer(&fb.fin[0])), cnt*unsafe.Sizeof(fb.fin[0]), &finptrmask[0], gcw)
|
||||
|
Loading…
Reference in New Issue
Block a user