1
0
mirror of https://github.com/golang/go synced 2024-11-16 17:54:39 -07:00

runtime: make all GC mark workers yield for forEachP

Currently dedicated GC mark workers really try to avoid getting
preempted. The one exception is for a pending STW, indicated by
sched.gcwaiting. This is currently fine because other kinds of
preemptions don't matter to the mark workers: they're intentionally
bound to their P.

With the new execution tracer we're going to want to use forEachP to get
the attention of all Ps. We may want to do this during a GC cycle.
forEachP doesn't set sched.gcwaiting, so it may end up waiting the full
GC mark phase, burning a thread and a P in the meantime. This can mean
basically seconds of waiting and trying to preempt GC mark workers.

This change makes all mark workers yield if (*p).runSafePointFn != 0 so
that the workers actually yield somewhat promptly in response to a
forEachP attempt.

Change-Id: I7430baf326886b9f7a868704482a224dae7c9bba
Reviewed-on: https://go-review.googlesource.com/c/go/+/537235
Reviewed-by: Michael Pratt <mpratt@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Reviewed-by: Mauri de Souza Meneguzzo <mauri870@gmail.com>
Auto-Submit: Michael Knyszek <mknyszek@google.com>
This commit is contained in:
Michael Anthony Knyszek 2023-10-23 19:30:35 +00:00 committed by Gopher Robot
parent ff7cf2d4cd
commit 25895d1c99

View File

@ -1068,7 +1068,7 @@ func gcDrainMarkWorkerFractional(gcw *gcWork) {
// credit to gcController.bgScanCredit every gcCreditSlack units of
// scan work.
//
// gcDrain will always return if there is a pending STW.
// gcDrain will always return if there is a pending STW or forEachP.
//
// Disabling write barriers is necessary to ensure that after we've
// confirmed that we've drained gcw, that we don't accidentally end
@ -1084,7 +1084,10 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
throw("gcDrain phase incorrect")
}
// N.B. We must be running in a non-preemptible context, so it's
// safe to hold a reference to our P here.
gp := getg().m.curg
pp := gp.m.p.ptr()
preemptible := flags&gcDrainUntilPreempt != 0
flushBgCredit := flags&gcDrainFlushBgCredit != 0
idle := flags&gcDrainIdle != 0
@ -1106,8 +1109,9 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
// Drain root marking jobs.
if work.markrootNext < work.markrootJobs {
// Stop if we're preemptible or if someone wants to STW.
for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) {
// Stop if we're preemptible, if someone wants to STW, or if
// someone is calling forEachP.
for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
job := atomic.Xadd(&work.markrootNext, +1) - 1
if job >= work.markrootJobs {
break
@ -1120,8 +1124,16 @@ func gcDrain(gcw *gcWork, flags gcDrainFlags) {
}
// Drain heap marking jobs.
// Stop if we're preemptible or if someone wants to STW.
for !(gp.preempt && (preemptible || sched.gcwaiting.Load())) {
//
// Stop if we're preemptible, if someone wants to STW, or if
// someone is calling forEachP.
//
// TODO(mknyszek): Consider always checking gp.preempt instead
// of having the preempt flag, and making an exception for certain
// mark workers in retake. That might be simpler than trying to
// enumerate all the reasons why we might want to preempt, even
// if we're supposed to be mostly non-preemptible.
for !(gp.preempt && (preemptible || sched.gcwaiting.Load() || pp.runSafePointFn != 0)) {
// Try to keep work available on the global queue. We used to
// check if there were waiting workers, but it's better to
// just keep work available than to make workers wait. In the