diff --git a/src/runtime/proc1.go b/src/runtime/proc1.go index a47df13691..1eef1b8dcd 100644 --- a/src/runtime/proc1.go +++ b/src/runtime/proc1.go @@ -3332,3 +3332,27 @@ func sync_atomic_runtime_procPin() int { func sync_atomic_runtime_procUnpin() { procUnpin() } + +// Active spinning for sync.Mutex. +//go:linkname sync_runtime_canSpin sync.runtime_canSpin +//go:nosplit +func sync_runtime_canSpin(i int) bool { + // sync.Mutex is cooperative, so we are conservative with spinning. + // Spin only few times and only if running on a multicore machine and + // GOMAXPROCS>1 and there is at least one other running P and local runq is empty. + // As opposed to runtime mutex we don't do passive spinning here, + // because there can be work on global runq on on other Ps. + if i >= active_spin || ncpu <= 1 || gomaxprocs <= int32(sched.npidle+sched.nmspinning)+1 { + return false + } + if p := getg().m.p; p.runqhead != p.runqtail { + return false + } + return true +} + +//go:linkname sync_runtime_doSpin sync.runtime_doSpin +//go:nosplit +func sync_runtime_doSpin() { + procyield(active_spin_cnt) +} diff --git a/src/sync/mutex.go b/src/sync/mutex.go index 73b3377022..3f280ad719 100644 --- a/src/sync/mutex.go +++ b/src/sync/mutex.go @@ -48,15 +48,31 @@ func (m *Mutex) Lock() { } awoke := false + iter := 0 for { old := m.state new := old | mutexLocked if old&mutexLocked != 0 { + if runtime_canSpin(iter) { + // Active spinning makes sense. + // Try to set mutexWoken flag to inform Unlock + // to not wake other blocked goroutines. + if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 && + atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) { + awoke = true + } + runtime_doSpin() + iter++ + continue + } new = old + 1<