mirror of
https://github.com/golang/go
synced 2024-11-19 14:54:43 -07:00
80832974ac
Currently runtime.rwmutex is written to block the calling goroutine rather than the calling thread. However, rwmutex was intended to be used in the scheduler, which means it needs to be a thread-level synchronization primitive. Hence, this modifies rwmutex to synchronize threads instead of goroutines. This has the consequence of making it write-barrier-free, which is also important for using it in the scheduler. The implementation makes three changes: it replaces the "w" semaphore with a mutex, since this was all it was being used for anyway; it replaces "writerSem" with a single pending M that parks on its note; and it replaces "readerSem" with a list of Ms that park on their notes plus a pass count that together emulate a counting semaphore. I model-checked the safety and liveness of this implementation through >1 billion schedules. For #20738. Change-Id: I3cf5a18c266a96a3f38165083812803510217787 Reviewed-on: https://go-review.googlesource.com/47071 Run-TryBot: Austin Clements <austin@google.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Ian Lance Taylor <iant@golang.org>
180 lines
3.7 KiB
Go
180 lines
3.7 KiB
Go
// Copyright 2017 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// GOMAXPROCS=10 go test
|
|
|
|
// This is a copy of sync/rwmutex_test.go rewritten to test the
|
|
// runtime rwmutex.
|
|
|
|
package runtime_test
|
|
|
|
import (
|
|
"fmt"
|
|
. "runtime"
|
|
"sync/atomic"
|
|
"testing"
|
|
)
|
|
|
|
func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
|
|
m.RLock()
|
|
clocked <- true
|
|
<-cunlock
|
|
m.RUnlock()
|
|
cdone <- true
|
|
}
|
|
|
|
func doTestParallelReaders(numReaders, gomaxprocs int) {
|
|
GOMAXPROCS(gomaxprocs)
|
|
var m RWMutex
|
|
clocked := make(chan bool)
|
|
cunlock := make(chan bool)
|
|
cdone := make(chan bool)
|
|
for i := 0; i < numReaders; i++ {
|
|
go parallelReader(&m, clocked, cunlock, cdone)
|
|
}
|
|
// Wait for all parallel RLock()s to succeed.
|
|
for i := 0; i < numReaders; i++ {
|
|
<-clocked
|
|
}
|
|
for i := 0; i < numReaders; i++ {
|
|
cunlock <- true
|
|
}
|
|
// Wait for the goroutines to finish.
|
|
for i := 0; i < numReaders; i++ {
|
|
<-cdone
|
|
}
|
|
}
|
|
|
|
func TestParallelRWMutexReaders(t *testing.T) {
|
|
defer GOMAXPROCS(GOMAXPROCS(-1))
|
|
doTestParallelReaders(1, 4)
|
|
doTestParallelReaders(3, 4)
|
|
doTestParallelReaders(4, 2)
|
|
}
|
|
|
|
func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
|
for i := 0; i < num_iterations; i++ {
|
|
rwm.RLock()
|
|
n := atomic.AddInt32(activity, 1)
|
|
if n < 1 || n >= 10000 {
|
|
panic(fmt.Sprintf("wlock(%d)\n", n))
|
|
}
|
|
for i := 0; i < 100; i++ {
|
|
}
|
|
atomic.AddInt32(activity, -1)
|
|
rwm.RUnlock()
|
|
}
|
|
cdone <- true
|
|
}
|
|
|
|
func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
|
|
for i := 0; i < num_iterations; i++ {
|
|
rwm.Lock()
|
|
n := atomic.AddInt32(activity, 10000)
|
|
if n != 10000 {
|
|
panic(fmt.Sprintf("wlock(%d)\n", n))
|
|
}
|
|
for i := 0; i < 100; i++ {
|
|
}
|
|
atomic.AddInt32(activity, -10000)
|
|
rwm.Unlock()
|
|
}
|
|
cdone <- true
|
|
}
|
|
|
|
func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
|
|
GOMAXPROCS(gomaxprocs)
|
|
// Number of active readers + 10000 * number of active writers.
|
|
var activity int32
|
|
var rwm RWMutex
|
|
cdone := make(chan bool)
|
|
go writer(&rwm, num_iterations, &activity, cdone)
|
|
var i int
|
|
for i = 0; i < numReaders/2; i++ {
|
|
go reader(&rwm, num_iterations, &activity, cdone)
|
|
}
|
|
go writer(&rwm, num_iterations, &activity, cdone)
|
|
for ; i < numReaders; i++ {
|
|
go reader(&rwm, num_iterations, &activity, cdone)
|
|
}
|
|
// Wait for the 2 writers and all readers to finish.
|
|
for i := 0; i < 2+numReaders; i++ {
|
|
<-cdone
|
|
}
|
|
}
|
|
|
|
func TestRWMutex(t *testing.T) {
|
|
defer GOMAXPROCS(GOMAXPROCS(-1))
|
|
n := 1000
|
|
if testing.Short() {
|
|
n = 5
|
|
}
|
|
HammerRWMutex(1, 1, n)
|
|
HammerRWMutex(1, 3, n)
|
|
HammerRWMutex(1, 10, n)
|
|
HammerRWMutex(4, 1, n)
|
|
HammerRWMutex(4, 3, n)
|
|
HammerRWMutex(4, 10, n)
|
|
HammerRWMutex(10, 1, n)
|
|
HammerRWMutex(10, 3, n)
|
|
HammerRWMutex(10, 10, n)
|
|
HammerRWMutex(10, 5, n)
|
|
}
|
|
|
|
func BenchmarkRWMutexUncontended(b *testing.B) {
|
|
type PaddedRWMutex struct {
|
|
RWMutex
|
|
pad [32]uint32
|
|
}
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
var rwm PaddedRWMutex
|
|
for pb.Next() {
|
|
rwm.RLock()
|
|
rwm.RLock()
|
|
rwm.RUnlock()
|
|
rwm.RUnlock()
|
|
rwm.Lock()
|
|
rwm.Unlock()
|
|
}
|
|
})
|
|
}
|
|
|
|
func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
|
|
var rwm RWMutex
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
foo := 0
|
|
for pb.Next() {
|
|
foo++
|
|
if foo%writeRatio == 0 {
|
|
rwm.Lock()
|
|
rwm.Unlock()
|
|
} else {
|
|
rwm.RLock()
|
|
for i := 0; i != localWork; i += 1 {
|
|
foo *= 2
|
|
foo /= 2
|
|
}
|
|
rwm.RUnlock()
|
|
}
|
|
}
|
|
_ = foo
|
|
})
|
|
}
|
|
|
|
func BenchmarkRWMutexWrite100(b *testing.B) {
|
|
benchmarkRWMutex(b, 0, 100)
|
|
}
|
|
|
|
func BenchmarkRWMutexWrite10(b *testing.B) {
|
|
benchmarkRWMutex(b, 0, 10)
|
|
}
|
|
|
|
func BenchmarkRWMutexWorkWrite100(b *testing.B) {
|
|
benchmarkRWMutex(b, 100, 100)
|
|
}
|
|
|
|
func BenchmarkRWMutexWorkWrite10(b *testing.B) {
|
|
benchmarkRWMutex(b, 100, 10)
|
|
}
|