1
0
mirror of https://github.com/golang/go synced 2024-11-22 08:34:40 -07:00

runtime: fix lost sleep causing TestZeroTimer flakes

Classic operating system kernel mistake: if you start using
per-CPU data without disabling interrupts on the CPU,
and then an interrupt reschedules the process onto a different
CPU, now you're using the wrong CPU's per-CPU data.
The same thing happens in Go if you use per-M or per-P
data structures while not holding a lock nor using acquirem.

In the original timer.modify before CL 564977, I had been
very careful about this during the "unlock t; lock ts" dance,
only calling releasem after ts was locked. That made sure
we used the right ts. The refactoring of that code into its
own helper function in CL 564977 missed that nuance.

The code

    ts := &getg().m.p.p.ptr().timers
    ts.lock()

was now executing without holding any locks nor acquirem.
If the goroutine changed its M or P between deciding which
ts to use and actually locking that ts, the code would proceed
to add the timer t to some other P's timers. If the P was idle
by then, the scheduler could have already checked it for timers
and not notice the newly added timer when deciding when the
next timer should trigger.

The solution is to do what the old code correctly did, namely
acquirem before deciding which ts to use, rather than assume
getg().m.p won't change before ts.lock can complete.
This CL does that.

Before CL 564977,

	stress ./time.test -test.run='ZeroTimer/impl=(func|cache)' -test.timeout=3m -test.count=20

ran without failure for over an hour on my laptop.
Starting in CL 564977, it consistently failed within a few minutes.
After this CL, it now runs without failure for over an hour again.

Fixes #66006.

Change-Id: Ib9e7ccaa0f22a326ce3fdef2b9a92f7f0bdafcbf
Reviewed-on: https://go-review.googlesource.com/c/go/+/571196
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Russ Cox <rsc@golang.org>
Reviewed-by: Ian Lance Taylor <iant@google.com>
This commit is contained in:
Russ Cox 2024-03-13 19:48:58 -04:00 committed by Gopher Robot
parent 9159d71a37
commit 2965dc9895
2 changed files with 19 additions and 1 deletions

View File

@ -526,7 +526,17 @@ func (t *timer) needsAdd() bool {
// may result in concurrent calls to t.maybeAdd, // may result in concurrent calls to t.maybeAdd,
// so we cannot assume that t is not in a heap on entry to t.maybeAdd. // so we cannot assume that t is not in a heap on entry to t.maybeAdd.
func (t *timer) maybeAdd() { func (t *timer) maybeAdd() {
ts := &getg().m.p.ptr().timers // Note: Not holding any locks on entry to t.maybeAdd,
// so the current g can be rescheduled to a different M and P
// at any time, including between the ts := assignment and the
// call to ts.lock. If a reschedule happened then, we would be
// adding t to some other P's timers, perhaps even a P that the scheduler
// has marked as idle with no timers, in which case the timer could
// go unnoticed until long after t.when.
// Calling acquirem instead of using getg().m makes sure that
// we end up locking and inserting into the current P's timers.
mp := acquirem()
ts := &mp.p.ptr().timers
ts.lock() ts.lock()
ts.cleanHead() ts.cleanHead()
t.lock() t.lock()
@ -539,6 +549,7 @@ func (t *timer) maybeAdd() {
} }
t.unlock() t.unlock()
ts.unlock() ts.unlock()
releasem(mp)
if when > 0 { if when > 0 {
wakeNetPoller(when) wakeNetPoller(when)
} }

View File

@ -656,6 +656,13 @@ func TestZeroTimer(t *testing.T) {
t.Run("impl=func", func(t *testing.T) { t.Run("impl=func", func(t *testing.T) {
testZeroTimer(t, newTimerFunc) testZeroTimer(t, newTimerFunc)
}) })
t.Run("impl=cache", func(t *testing.T) {
timer := newTimerFunc(Hour)
testZeroTimer(t, func(d Duration) *Timer {
timer.Reset(d)
return timer
})
})
} }
func testZeroTimer(t *testing.T, newTimer func(Duration) *Timer) { func testZeroTimer(t *testing.T, newTimer func(Duration) *Timer) {