diff --git a/src/runtime/align_runtime_test.go b/src/runtime/align_runtime_test.go index c3b9c1712c..e7af4cd6ff 100644 --- a/src/runtime/align_runtime_test.go +++ b/src/runtime/align_runtime_test.go @@ -17,7 +17,6 @@ var AtomicFields = []uintptr{ unsafe.Offsetof(p{}.timer0When), unsafe.Offsetof(p{}.timerModifiedEarliest), unsafe.Offsetof(p{}.gcFractionalMarkTime), - unsafe.Offsetof(schedt{}.pollUntil), unsafe.Offsetof(schedt{}.timeToRun), unsafe.Offsetof(timeHistogram{}.underflow), unsafe.Offsetof(profBuf{}.overflow), diff --git a/src/runtime/proc.go b/src/runtime/proc.go index fd9c1daf43..07c7b1b7c1 100644 --- a/src/runtime/proc.go +++ b/src/runtime/proc.go @@ -2804,7 +2804,7 @@ top: // Poll network until next timer. if netpollinited() && (atomic.Load(&netpollWaiters) > 0 || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 { - atomic.Store64(&sched.pollUntil, uint64(pollUntil)) + sched.pollUntil.Store(pollUntil) if mp.p != 0 { throw("findrunnable: netpoll with p") } @@ -2825,7 +2825,7 @@ top: delay = 0 } list := netpoll(delay) // block until new work is available - atomic.Store64(&sched.pollUntil, 0) + sched.pollUntil.Store(0) sched.lastpoll.Store(now) if faketime != 0 && list.empty() { // Using fake time and nothing is ready; stop M. @@ -2856,7 +2856,7 @@ top: goto top } } else if pollUntil != 0 && netpollinited() { - pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) + pollerPollUntil := sched.pollUntil.Load() if pollerPollUntil == 0 || pollerPollUntil > pollUntil { netpollBreak() } @@ -3071,7 +3071,7 @@ func wakeNetPoller(when int64) { // field is either zero or the time to which the current // poll is expected to run. This can have a spurious wakeup // but should never miss a wakeup. - pollerPollUntil := int64(atomic.Load64(&sched.pollUntil)) + pollerPollUntil := sched.pollUntil.Load() if pollerPollUntil == 0 || pollerPollUntil > when { netpollBreak() } diff --git a/src/runtime/runtime2.go b/src/runtime/runtime2.go index 1d678883f0..17d47c0726 100644 --- a/src/runtime/runtime2.go +++ b/src/runtime/runtime2.go @@ -758,10 +758,9 @@ type p struct { } type schedt struct { - // accessed atomically. keep at top to ensure alignment on 32-bit systems. goidgen atomic.Uint64 lastpoll atomic.Int64 // time of last network poll, 0 if currently polling - pollUntil uint64 // time to which current poll is sleeping + pollUntil atomic.Int64 // time to which current poll is sleeping lock mutex