mirror of
https://github.com/golang/go
synced 2024-11-20 04:04:41 -07:00
f3b5a2bc19
Currently only the rwmutex write lock prevents descheduling. The read lock does not. This leads to the following situation: 1. A reader acquires the lock and gets descheduled. 2. GOMAXPROCS writers attempt to acquire the lock (or at least one writer does, followed by readers). This blocks all of the Ps. 3. There is no 3. The descheduled reader never gets to run again because there are no Ps, so it never releases the lock and the system deadlocks. Fix this by preventing descheduling while holding the read lock. This requires also rewriting TestParallelRWMutexReaders to always create enough GOMAXPROCS and to use non-blocking operations for synchronization. Fixes #20903. Change-Id: Ibd460663a7e5a555be5490e13b2eaaa295fac39f Reviewed-on: https://go-review.googlesource.com/47632 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
179 lines
3.7 KiB
Go
179 lines
3.7 KiB
Go
// Copyright 2017 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// GOMAXPROCS=10 go test
|
|
|
|
// This is a copy of sync/rwmutex_test.go rewritten to test the
|
|
// runtime rwmutex.
|
|
|
|
package runtime_test
|
|
|
|
import (
|
|
"fmt"
|
|
. "runtime"
|
|
"sync/atomic"
|
|
"testing"
|
|
)
|
|
|
|
func parallelReader(m *RWMutex, clocked chan bool, cunlock *uint32, cdone chan bool) {
|
|
m.RLock()
|
|
clocked <- true
|
|
for atomic.LoadUint32(cunlock) == 0 {
|
|
}
|
|
m.RUnlock()
|
|
cdone <- true
|
|
}
|
|
|
|
func doTestParallelReaders(numReaders int) {
|
|
GOMAXPROCS(numReaders + 1)
|
|
var m RWMutex
|
|
clocked := make(chan bool, numReaders)
|
|
var cunlock uint32
|
|
cdone := make(chan bool)
|
|
for i := 0; i < numReaders; i++ {
|
|
go parallelReader(&m, clocked, &cunlock, cdone)
|
|
}
|
|
// Wait for all parallel RLock()s to succeed.
|
|
for i := 0; i < numReaders; i++ {
|
|
<-clocked
|
|
}
|
|
atomic.StoreUint32(&cunlock, 1)
|
|
// Wait for the goroutines to finish.
|
|
for i := 0; i < numReaders; i++ {
|
|
<-cdone
|
|
}
|
|
}
|
|
|
|
func TestParallelRWMutexReaders(t *testing.T) {
|
|
defer GOMAXPROCS(GOMAXPROCS(-1))
|
|
doTestParallelReaders(1)
|
|
doTestParallelReaders(3)
|
|
doTestParallelReaders(4)
|
|
}
|
|
|
|
func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
|
for i := 0; i < num_iterations; i++ {
|
|
rwm.RLock()
|
|
n := atomic.AddInt32(activity, 1)
|
|
if n < 1 || n >= 10000 {
|
|
panic(fmt.Sprintf("wlock(%d)\n", n))
|
|
}
|
|
for i := 0; i < 100; i++ {
|
|
}
|
|
atomic.AddInt32(activity, -1)
|
|
rwm.RUnlock()
|
|
}
|
|
cdone <- true
|
|
}
|
|
|
|
func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
|
for i := 0; i < num_iterations; i++ {
|
|
rwm.Lock()
|
|
n := atomic.AddInt32(activity, 10000)
|
|
if n != 10000 {
|
|
panic(fmt.Sprintf("wlock(%d)\n", n))
|
|
}
|
|
for i := 0; i < 100; i++ {
|
|
}
|
|
atomic.AddInt32(activity, -10000)
|
|
rwm.Unlock()
|
|
}
|
|
cdone <- true
|
|
}
|
|
|
|
func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
|
|
GOMAXPROCS(gomaxprocs)
|
|
// Number of active readers + 10000 * number of active writers.
|
|
var activity int32
|
|
var rwm RWMutex
|
|
cdone := make(chan bool)
|
|
go writer(&rwm, num_iterations, &activity, cdone)
|
|
var i int
|
|
for i = 0; i < numReaders/2; i++ {
|
|
go reader(&rwm, num_iterations, &activity, cdone)
|
|
}
|
|
go writer(&rwm, num_iterations, &activity, cdone)
|
|
for ; i < numReaders; i++ {
|
|
go reader(&rwm, num_iterations, &activity, cdone)
|
|
}
|
|
// Wait for the 2 writers and all readers to finish.
|
|
for i := 0; i < 2+numReaders; i++ {
|
|
<-cdone
|
|
}
|
|
}
|
|
|
|
func TestRWMutex(t *testing.T) {
|
|
defer GOMAXPROCS(GOMAXPROCS(-1))
|
|
n := 1000
|
|
if testing.Short() {
|
|
n = 5
|
|
}
|
|
HammerRWMutex(1, 1, n)
|
|
HammerRWMutex(1, 3, n)
|
|
HammerRWMutex(1, 10, n)
|
|
HammerRWMutex(4, 1, n)
|
|
HammerRWMutex(4, 3, n)
|
|
HammerRWMutex(4, 10, n)
|
|
HammerRWMutex(10, 1, n)
|
|
HammerRWMutex(10, 3, n)
|
|
HammerRWMutex(10, 10, n)
|
|
HammerRWMutex(10, 5, n)
|
|
}
|
|
|
|
func BenchmarkRWMutexUncontended(b *testing.B) {
|
|
type PaddedRWMutex struct {
|
|
RWMutex
|
|
pad [32]uint32
|
|
}
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
var rwm PaddedRWMutex
|
|
for pb.Next() {
|
|
rwm.RLock()
|
|
rwm.RLock()
|
|
rwm.RUnlock()
|
|
rwm.RUnlock()
|
|
rwm.Lock()
|
|
rwm.Unlock()
|
|
}
|
|
})
|
|
}
|
|
|
|
func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
|
|
var rwm RWMutex
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
foo := 0
|
|
for pb.Next() {
|
|
foo++
|
|
if foo%writeRatio == 0 {
|
|
rwm.Lock()
|
|
rwm.Unlock()
|
|
} else {
|
|
rwm.RLock()
|
|
for i := 0; i != localWork; i += 1 {
|
|
foo *= 2
|
|
foo /= 2
|
|
}
|
|
rwm.RUnlock()
|
|
}
|
|
}
|
|
_ = foo
|
|
})
|
|
}
|
|
|
|
func BenchmarkRWMutexWrite100(b *testing.B) {
|
|
benchmarkRWMutex(b, 0, 100)
|
|
}
|
|
|
|
func BenchmarkRWMutexWrite10(b *testing.B) {
|
|
benchmarkRWMutex(b, 0, 10)
|
|
}
|
|
|
|
func BenchmarkRWMutexWorkWrite100(b *testing.B) {
|
|
benchmarkRWMutex(b, 100, 100)
|
|
}
|
|
|
|
func BenchmarkRWMutexWorkWrite10(b *testing.B) {
|
|
benchmarkRWMutex(b, 100, 10)
|
|
}
|