1
0
mirror of https://github.com/golang/go synced 2024-10-04 06:11:21 -06:00

sync: use RunParallel in benchmarks

LGTM=bradfitz
R=golang-codereviews, bradfitz
CC=golang-codereviews
https://golang.org/cl/68050043
This commit is contained in:
Dmitriy Vyukov 2014-02-25 14:39:12 +04:00
parent 1c9861918b
commit ec0c9f270e
6 changed files with 126 additions and 286 deletions

View File

@ -9,7 +9,6 @@ package sync_test
import ( import (
"runtime" "runtime"
. "sync" . "sync"
"sync/atomic"
"testing" "testing"
) )
@ -90,63 +89,34 @@ func BenchmarkMutexUncontended(b *testing.B) {
Mutex Mutex
pad [128]uint8 pad [128]uint8
} }
const CallsPerSched = 1000 b.RunParallel(func(pb *testing.PB) {
procs := runtime.GOMAXPROCS(-1) var mu PaddedMutex
N := int32(b.N / CallsPerSched) for pb.Next() {
c := make(chan bool, procs) mu.Lock()
for p := 0; p < procs; p++ { mu.Unlock()
go func() { }
var mu PaddedMutex })
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
mu.Lock()
mu.Unlock()
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
} }
func benchmarkMutex(b *testing.B, slack, work bool) { func benchmarkMutex(b *testing.B, slack, work bool) {
const (
CallsPerSched = 1000
LocalWork = 100
GoroutineSlack = 10
)
procs := runtime.GOMAXPROCS(-1)
if slack {
procs *= GoroutineSlack
}
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var mu Mutex var mu Mutex
for p := 0; p < procs; p++ { if slack {
go func() { b.SetParallelism(10)
foo := 0 }
for atomic.AddInt32(&N, -1) >= 0 { b.RunParallel(func(pb *testing.PB) {
runtime.Gosched() foo := 0
for g := 0; g < CallsPerSched; g++ { for pb.Next() {
mu.Lock() mu.Lock()
mu.Unlock() mu.Unlock()
if work { if work {
for i := 0; i < LocalWork; i++ { for i := 0; i < 100; i++ {
foo *= 2 foo *= 2
foo /= 2 foo /= 2
}
}
} }
} }
c <- foo == 42 }
}() _ = foo
} })
for p := 0; p < procs; p++ {
<-c
}
} }
func BenchmarkMutex(b *testing.B) { func BenchmarkMutex(b *testing.B) {

View File

@ -5,9 +5,7 @@
package sync_test package sync_test
import ( import (
"runtime"
. "sync" . "sync"
"sync/atomic"
"testing" "testing"
) )
@ -62,24 +60,11 @@ func TestOncePanic(t *testing.T) {
} }
func BenchmarkOnce(b *testing.B) { func BenchmarkOnce(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
var once Once var once Once
f := func() {} f := func() {}
c := make(chan bool, procs) b.RunParallel(func(pb *testing.PB) {
for p := 0; p < procs; p++ { for pb.Next() {
go func() { once.Do(f)
for atomic.AddInt32(&N, -1) >= 0 { }
runtime.Gosched() })
for g := 0; g < CallsPerSched; g++ {
once.Do(f)
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
} }

View File

@ -128,42 +128,24 @@ func TestPoolStress(t *testing.T) {
func BenchmarkPool(b *testing.B) { func BenchmarkPool(b *testing.B) {
var p Pool var p Pool
var wg WaitGroup b.RunParallel(func(pb *testing.PB) {
n0 := uintptr(b.N) for pb.Next() {
n := n0 p.Put(1)
for i := 0; i < runtime.GOMAXPROCS(0); i++ { p.Get()
wg.Add(1) }
go func() { })
defer wg.Done()
for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
for b := 0; b < 100; b++ {
p.Put(1)
p.Get()
}
}
}()
}
wg.Wait()
} }
func BenchmarkPoolOverlflow(b *testing.B) { func BenchmarkPoolOverlflow(b *testing.B) {
var p Pool var p Pool
var wg WaitGroup b.RunParallel(func(pb *testing.PB) {
n0 := uintptr(b.N) for pb.Next() {
n := n0 for b := 0; b < 100; b++ {
for i := 0; i < runtime.GOMAXPROCS(0); i++ { p.Put(1)
wg.Add(1)
go func() {
defer wg.Done()
for atomic.AddUintptr(&n, ^uintptr(0)) < n0 {
for b := 0; b < 100; b++ {
p.Put(1)
}
for b := 0; b < 100; b++ {
p.Get()
}
} }
}() for b := 0; b < 100; b++ {
} p.Get()
wg.Wait() }
}
})
} }

View File

@ -7,7 +7,6 @@ package sync_test
import ( import (
"runtime" "runtime"
. "sync" . "sync"
"sync/atomic"
"testing" "testing"
) )
@ -16,72 +15,44 @@ func BenchmarkSemaUncontended(b *testing.B) {
sem uint32 sem uint32
pad [32]uint32 pad [32]uint32
} }
const CallsPerSched = 1000 b.RunParallel(func(pb *testing.PB) {
procs := runtime.GOMAXPROCS(-1) sem := new(PaddedSem)
N := int32(b.N / CallsPerSched) for pb.Next() {
c := make(chan bool, procs) Runtime_Semrelease(&sem.sem)
for p := 0; p < procs; p++ { Runtime_Semacquire(&sem.sem)
go func() { }
sem := new(PaddedSem) })
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
Runtime_Semrelease(&sem.sem)
Runtime_Semacquire(&sem.sem)
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
} }
func benchmarkSema(b *testing.B, block, work bool) { func benchmarkSema(b *testing.B, block, work bool) {
const CallsPerSched = 1000
const LocalWork = 100
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
c2 := make(chan bool, procs/2)
sem := uint32(0) sem := uint32(0)
if block { if block {
for p := 0; p < procs/2; p++ { done := make(chan bool)
go func() {
Runtime_Semacquire(&sem)
c2 <- true
}()
}
}
for p := 0; p < procs; p++ {
go func() { go func() {
foo := 0 for p := 0; p < runtime.GOMAXPROCS(0)/2; p++ {
for atomic.AddInt32(&N, -1) >= 0 { Runtime_Semacquire(&sem)
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
Runtime_Semrelease(&sem)
if work {
for i := 0; i < LocalWork; i++ {
foo *= 2
foo /= 2
}
}
Runtime_Semacquire(&sem)
}
} }
c <- foo == 42 done <- true
Runtime_Semrelease(&sem) }()
defer func() {
<-done
}() }()
} }
if block { b.RunParallel(func(pb *testing.PB) {
for p := 0; p < procs/2; p++ { foo := 0
<-c2 for pb.Next() {
Runtime_Semrelease(&sem)
if work {
for i := 0; i < 100; i++ {
foo *= 2
foo /= 2
}
}
Runtime_Semacquire(&sem)
} }
} _ = foo
for p := 0; p < procs; p++ { Runtime_Semrelease(&sem)
<-c })
}
} }
func BenchmarkSemaSyntNonblock(b *testing.B) { func BenchmarkSemaSyntNonblock(b *testing.B) {

View File

@ -160,64 +160,39 @@ func BenchmarkRWMutexUncontended(b *testing.B) {
RWMutex RWMutex
pad [32]uint32 pad [32]uint32
} }
const CallsPerSched = 1000 b.RunParallel(func(pb *testing.PB) {
procs := runtime.GOMAXPROCS(-1) var rwm PaddedRWMutex
N := int32(b.N / CallsPerSched) for pb.Next() {
c := make(chan bool, procs) rwm.RLock()
for p := 0; p < procs; p++ { rwm.RLock()
go func() { rwm.RUnlock()
var rwm PaddedRWMutex rwm.RUnlock()
for atomic.AddInt32(&N, -1) >= 0 { rwm.Lock()
runtime.Gosched() rwm.Unlock()
for g := 0; g < CallsPerSched; g++ { }
rwm.RLock() })
rwm.RLock()
rwm.RUnlock()
rwm.RUnlock()
rwm.Lock()
rwm.Unlock()
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
} }
func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) { func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var rwm RWMutex var rwm RWMutex
for p := 0; p < procs; p++ { b.RunParallel(func(pb *testing.PB) {
go func() { foo := 0
foo := 0 for pb.Next() {
for atomic.AddInt32(&N, -1) >= 0 { foo++
runtime.Gosched() if foo%writeRatio == 0 {
for g := 0; g < CallsPerSched; g++ { rwm.Lock()
foo++ rwm.Unlock()
if foo%writeRatio == 0 { } else {
rwm.Lock() rwm.RLock()
rwm.Unlock() for i := 0; i != localWork; i += 1 {
} else { foo *= 2
rwm.RLock() foo /= 2
for i := 0; i != localWork; i += 1 {
foo *= 2
foo /= 2
}
rwm.RUnlock()
}
} }
rwm.RUnlock()
} }
c <- foo == 42 }
}() _ = foo
} })
for p := 0; p < procs; p++ {
<-c
}
} }
func BenchmarkRWMutexWrite100(b *testing.B) { func BenchmarkRWMutexWrite100(b *testing.B) {

View File

@ -5,9 +5,7 @@
package sync_test package sync_test
import ( import (
"runtime"
. "sync" . "sync"
"sync/atomic"
"testing" "testing"
) )
@ -66,55 +64,30 @@ func BenchmarkWaitGroupUncontended(b *testing.B) {
WaitGroup WaitGroup
pad [128]uint8 pad [128]uint8
} }
const CallsPerSched = 1000 b.RunParallel(func(pb *testing.PB) {
procs := runtime.GOMAXPROCS(-1) var wg PaddedWaitGroup
N := int32(b.N / CallsPerSched) for pb.Next() {
c := make(chan bool, procs) wg.Add(1)
for p := 0; p < procs; p++ { wg.Done()
go func() { wg.Wait()
var wg PaddedWaitGroup }
for atomic.AddInt32(&N, -1) >= 0 { })
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
wg.Add(1)
wg.Done()
wg.Wait()
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
} }
func benchmarkWaitGroupAddDone(b *testing.B, localWork int) { func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var wg WaitGroup var wg WaitGroup
for p := 0; p < procs; p++ { b.RunParallel(func(pb *testing.PB) {
go func() { foo := 0
foo := 0 for pb.Next() {
for atomic.AddInt32(&N, -1) >= 0 { wg.Add(1)
runtime.Gosched() for i := 0; i < localWork; i++ {
for g := 0; g < CallsPerSched; g++ { foo *= 2
wg.Add(1) foo /= 2
for i := 0; i < localWork; i++ {
foo *= 2
foo /= 2
}
wg.Done()
}
} }
c <- foo == 42 wg.Done()
}() }
} _ = foo
for p := 0; p < procs; p++ { })
<-c
}
} }
func BenchmarkWaitGroupAddDone(b *testing.B) { func BenchmarkWaitGroupAddDone(b *testing.B) {
@ -126,34 +99,18 @@ func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
} }
func benchmarkWaitGroupWait(b *testing.B, localWork int) { func benchmarkWaitGroupWait(b *testing.B, localWork int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
var wg WaitGroup var wg WaitGroup
wg.Add(procs) b.RunParallel(func(pb *testing.PB) {
for p := 0; p < procs; p++ { foo := 0
go wg.Done() for pb.Next() {
} wg.Wait()
for p := 0; p < procs; p++ { for i := 0; i < localWork; i++ {
go func() { foo *= 2
foo := 0 foo /= 2
for atomic.AddInt32(&N, -1) >= 0 {
runtime.Gosched()
for g := 0; g < CallsPerSched; g++ {
wg.Wait()
for i := 0; i < localWork; i++ {
foo *= 2
foo /= 2
}
}
} }
c <- foo == 42 }
}() _ = foo
} })
for p := 0; p < procs; p++ {
<-c
}
} }
func BenchmarkWaitGroupWait(b *testing.B) { func BenchmarkWaitGroupWait(b *testing.B) {