mirror of
https://github.com/golang/go
synced 2024-10-03 06:21:21 -06:00
edcad8639a
Currently sync.Mutex is fully cooperative. That is, once contention is discovered, the goroutine calls into scheduler. This is suboptimal as the resource can become free soon after (especially if critical sections are short). Server software usually runs at ~~50% CPU utilization, that is, switching to other goroutines is not necessary profitable. This change adds limited active spinning to sync.Mutex if: 1. running on a multicore machine and 2. GOMAXPROCS>1 and 3. there is at least one other running P and 4. local runq is empty. As opposed to runtime mutex we don't do passive spinning, because there can be work on global runq on on other Ps. benchmark old ns/op new ns/op delta BenchmarkMutexNoSpin 1271 1272 +0.08% BenchmarkMutexNoSpin-2 702 683 -2.71% BenchmarkMutexNoSpin-4 377 372 -1.33% BenchmarkMutexNoSpin-8 197 190 -3.55% BenchmarkMutexNoSpin-16 131 122 -6.87% BenchmarkMutexNoSpin-32 170 164 -3.53% BenchmarkMutexSpin 4724 4728 +0.08% BenchmarkMutexSpin-2 2501 2491 -0.40% BenchmarkMutexSpin-4 1330 1325 -0.38% BenchmarkMutexSpin-8 684 684 +0.00% BenchmarkMutexSpin-16 414 372 -10.14% BenchmarkMutexSpin-32 559 469 -16.10% BenchmarkMutex 19.1 19.1 +0.00% BenchmarkMutex-2 81.6 54.3 -33.46% BenchmarkMutex-4 143 100 -30.07% BenchmarkMutex-8 154 156 +1.30% BenchmarkMutex-16 140 159 +13.57% BenchmarkMutex-32 141 163 +15.60% BenchmarkMutexSlack 33.3 31.2 -6.31% BenchmarkMutexSlack-2 122 97.7 -19.92% BenchmarkMutexSlack-4 168 158 -5.95% BenchmarkMutexSlack-8 152 158 +3.95% BenchmarkMutexSlack-16 140 159 +13.57% BenchmarkMutexSlack-32 146 162 +10.96% BenchmarkMutexWork 154 154 +0.00% BenchmarkMutexWork-2 89.2 89.9 +0.78% BenchmarkMutexWork-4 139 86.1 -38.06% BenchmarkMutexWork-8 177 162 -8.47% BenchmarkMutexWork-16 170 173 +1.76% BenchmarkMutexWork-32 176 176 +0.00% BenchmarkMutexWorkSlack 160 160 +0.00% BenchmarkMutexWorkSlack-2 103 99.1 -3.79% BenchmarkMutexWorkSlack-4 155 148 -4.52% BenchmarkMutexWorkSlack-8 176 170 -3.41% BenchmarkMutexWorkSlack-16 170 173 +1.76% BenchmarkMutexWorkSlack-32 175 176 +0.57% "No work" benchmarks are not very interesting (BenchmarkMutex and BenchmarkMutexSlack), as they are absolutely not realistic. Fixes #8889 Change-Id: I6f14f42af1fa48f73a776fdd11f0af6dd2bb428b Reviewed-on: https://go-review.googlesource.com/5430 Reviewed-by: Rick Hudson <rlh@golang.org> Run-TryBot: Dmitry Vyukov <dvyukov@google.com>
127 lines
3.1 KiB
Go
127 lines
3.1 KiB
Go
// Copyright 2009 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Package sync provides basic synchronization primitives such as mutual
|
|
// exclusion locks. Other than the Once and WaitGroup types, most are intended
|
|
// for use by low-level library routines. Higher-level synchronization is
|
|
// better done via channels and communication.
|
|
//
|
|
// Values containing the types defined in this package should not be copied.
|
|
package sync
|
|
|
|
import (
|
|
"sync/atomic"
|
|
"unsafe"
|
|
)
|
|
|
|
// A Mutex is a mutual exclusion lock.
|
|
// Mutexes can be created as part of other structures;
|
|
// the zero value for a Mutex is an unlocked mutex.
|
|
type Mutex struct {
|
|
state int32
|
|
sema uint32
|
|
}
|
|
|
|
// A Locker represents an object that can be locked and unlocked.
|
|
type Locker interface {
|
|
Lock()
|
|
Unlock()
|
|
}
|
|
|
|
const (
|
|
mutexLocked = 1 << iota // mutex is locked
|
|
mutexWoken
|
|
mutexWaiterShift = iota
|
|
)
|
|
|
|
// Lock locks m.
|
|
// If the lock is already in use, the calling goroutine
|
|
// blocks until the mutex is available.
|
|
func (m *Mutex) Lock() {
|
|
// Fast path: grab unlocked mutex.
|
|
if atomic.CompareAndSwapInt32(&m.state, 0, mutexLocked) {
|
|
if raceenabled {
|
|
raceAcquire(unsafe.Pointer(m))
|
|
}
|
|
return
|
|
}
|
|
|
|
awoke := false
|
|
iter := 0
|
|
for {
|
|
old := m.state
|
|
new := old | mutexLocked
|
|
if old&mutexLocked != 0 {
|
|
if runtime_canSpin(iter) {
|
|
// Active spinning makes sense.
|
|
// Try to set mutexWoken flag to inform Unlock
|
|
// to not wake other blocked goroutines.
|
|
if !awoke && old&mutexWoken == 0 && old>>mutexWaiterShift != 0 &&
|
|
atomic.CompareAndSwapInt32(&m.state, old, old|mutexWoken) {
|
|
awoke = true
|
|
}
|
|
runtime_doSpin()
|
|
iter++
|
|
continue
|
|
}
|
|
new = old + 1<<mutexWaiterShift
|
|
}
|
|
if awoke {
|
|
// The goroutine has been woken from sleep,
|
|
// so we need to reset the flag in either case.
|
|
if new&mutexWoken == 0 {
|
|
panic("sync: inconsistent mutex state")
|
|
}
|
|
new &^= mutexWoken
|
|
}
|
|
if atomic.CompareAndSwapInt32(&m.state, old, new) {
|
|
if old&mutexLocked == 0 {
|
|
break
|
|
}
|
|
runtime_Semacquire(&m.sema)
|
|
awoke = true
|
|
iter = 0
|
|
}
|
|
}
|
|
|
|
if raceenabled {
|
|
raceAcquire(unsafe.Pointer(m))
|
|
}
|
|
}
|
|
|
|
// Unlock unlocks m.
|
|
// It is a run-time error if m is not locked on entry to Unlock.
|
|
//
|
|
// A locked Mutex is not associated with a particular goroutine.
|
|
// It is allowed for one goroutine to lock a Mutex and then
|
|
// arrange for another goroutine to unlock it.
|
|
func (m *Mutex) Unlock() {
|
|
if raceenabled {
|
|
_ = m.state
|
|
raceRelease(unsafe.Pointer(m))
|
|
}
|
|
|
|
// Fast path: drop lock bit.
|
|
new := atomic.AddInt32(&m.state, -mutexLocked)
|
|
if (new+mutexLocked)&mutexLocked == 0 {
|
|
panic("sync: unlock of unlocked mutex")
|
|
}
|
|
|
|
old := new
|
|
for {
|
|
// If there are no waiters or a goroutine has already
|
|
// been woken or grabbed the lock, no need to wake anyone.
|
|
if old>>mutexWaiterShift == 0 || old&(mutexLocked|mutexWoken) != 0 {
|
|
return
|
|
}
|
|
// Grab the right to wake someone.
|
|
new = (old - 1<<mutexWaiterShift) | mutexWoken
|
|
if atomic.CompareAndSwapInt32(&m.state, old, new) {
|
|
runtime_Semrelease(&m.sema)
|
|
return
|
|
}
|
|
old = m.state
|
|
}
|
|
}
|