From ec0c9f270e8c09b00df5e45d87cfa4c85df63271 Mon Sep 17 00:00:00 2001 From: Dmitriy Vyukov Date: Tue, 25 Feb 2014 14:39:12 +0400 Subject: [PATCH] sync: use RunParallel in benchmarks LGTM=bradfitz R=golang-codereviews, bradfitz CC=golang-codereviews https://golang.org/cl/68050043 --- src/pkg/sync/mutex_test.go | 74 +++++++--------------- src/pkg/sync/once_test.go | 25 ++------ src/pkg/sync/pool_test.go | 48 +++++--------- src/pkg/sync/runtime_sema_test.go | 85 +++++++++---------------- src/pkg/sync/rwmutex_test.go | 79 ++++++++--------------- src/pkg/sync/waitgroup_test.go | 101 +++++++++--------------------- 6 files changed, 126 insertions(+), 286 deletions(-) diff --git a/src/pkg/sync/mutex_test.go b/src/pkg/sync/mutex_test.go index bf78c6f609c..151b25c10fc 100644 --- a/src/pkg/sync/mutex_test.go +++ b/src/pkg/sync/mutex_test.go @@ -9,7 +9,6 @@ package sync_test import ( "runtime" . "sync" - "sync/atomic" "testing" ) @@ -90,63 +89,34 @@ func BenchmarkMutexUncontended(b *testing.B) { Mutex pad [128]uint8 } - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) - for p := 0; p < procs; p++ { - go func() { - var mu PaddedMutex - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - mu.Lock() - mu.Unlock() - } - } - c <- true - }() - } - for p := 0; p < procs; p++ { - <-c - } + b.RunParallel(func(pb *testing.PB) { + var mu PaddedMutex + for pb.Next() { + mu.Lock() + mu.Unlock() + } + }) } func benchmarkMutex(b *testing.B, slack, work bool) { - const ( - CallsPerSched = 1000 - LocalWork = 100 - GoroutineSlack = 10 - ) - procs := runtime.GOMAXPROCS(-1) - if slack { - procs *= GoroutineSlack - } - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) var mu Mutex - for p := 0; p < procs; p++ { - go func() { - foo := 0 - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - mu.Lock() - mu.Unlock() - if work { - for i := 0; i < LocalWork; i++ { - foo *= 2 - foo /= 2 - } - } + if slack { + b.SetParallelism(10) + } + b.RunParallel(func(pb *testing.PB) { + foo := 0 + for pb.Next() { + mu.Lock() + mu.Unlock() + if work { + for i := 0; i < 100; i++ { + foo *= 2 + foo /= 2 } } - c <- foo == 42 - }() - } - for p := 0; p < procs; p++ { - <-c - } + } + _ = foo + }) } func BenchmarkMutex(b *testing.B) { diff --git a/src/pkg/sync/once_test.go b/src/pkg/sync/once_test.go index 183069a1a23..8afda82f3e1 100644 --- a/src/pkg/sync/once_test.go +++ b/src/pkg/sync/once_test.go @@ -5,9 +5,7 @@ package sync_test import ( - "runtime" . "sync" - "sync/atomic" "testing" ) @@ -62,24 +60,11 @@ func TestOncePanic(t *testing.T) { } func BenchmarkOnce(b *testing.B) { - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) var once Once f := func() {} - c := make(chan bool, procs) - for p := 0; p < procs; p++ { - go func() { - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - once.Do(f) - } - } - c <- true - }() - } - for p := 0; p < procs; p++ { - <-c - } + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + once.Do(f) + } + }) } diff --git a/src/pkg/sync/pool_test.go b/src/pkg/sync/pool_test.go index 7e02f69d6c3..a34719ab2ce 100644 --- a/src/pkg/sync/pool_test.go +++ b/src/pkg/sync/pool_test.go @@ -128,42 +128,24 @@ func TestPoolStress(t *testing.T) { func BenchmarkPool(b *testing.B) { var p Pool - var wg WaitGroup - n0 := uintptr(b.N) - n := n0 - for i := 0; i < runtime.GOMAXPROCS(0); i++ { - wg.Add(1) - go func() { - defer wg.Done() - for atomic.AddUintptr(&n, ^uintptr(0)) < n0 { - for b := 0; b < 100; b++ { - p.Put(1) - p.Get() - } - } - }() - } - wg.Wait() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + p.Put(1) + p.Get() + } + }) } func BenchmarkPoolOverlflow(b *testing.B) { var p Pool - var wg WaitGroup - n0 := uintptr(b.N) - n := n0 - for i := 0; i < runtime.GOMAXPROCS(0); i++ { - wg.Add(1) - go func() { - defer wg.Done() - for atomic.AddUintptr(&n, ^uintptr(0)) < n0 { - for b := 0; b < 100; b++ { - p.Put(1) - } - for b := 0; b < 100; b++ { - p.Get() - } + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + for b := 0; b < 100; b++ { + p.Put(1) } - }() - } - wg.Wait() + for b := 0; b < 100; b++ { + p.Get() + } + } + }) } diff --git a/src/pkg/sync/runtime_sema_test.go b/src/pkg/sync/runtime_sema_test.go index 57a8dbee783..5b7dd3df3f0 100644 --- a/src/pkg/sync/runtime_sema_test.go +++ b/src/pkg/sync/runtime_sema_test.go @@ -7,7 +7,6 @@ package sync_test import ( "runtime" . "sync" - "sync/atomic" "testing" ) @@ -16,72 +15,44 @@ func BenchmarkSemaUncontended(b *testing.B) { sem uint32 pad [32]uint32 } - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) - for p := 0; p < procs; p++ { - go func() { - sem := new(PaddedSem) - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - Runtime_Semrelease(&sem.sem) - Runtime_Semacquire(&sem.sem) - } - } - c <- true - }() - } - for p := 0; p < procs; p++ { - <-c - } + b.RunParallel(func(pb *testing.PB) { + sem := new(PaddedSem) + for pb.Next() { + Runtime_Semrelease(&sem.sem) + Runtime_Semacquire(&sem.sem) + } + }) } func benchmarkSema(b *testing.B, block, work bool) { - const CallsPerSched = 1000 - const LocalWork = 100 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) - c2 := make(chan bool, procs/2) sem := uint32(0) if block { - for p := 0; p < procs/2; p++ { - go func() { - Runtime_Semacquire(&sem) - c2 <- true - }() - } - } - for p := 0; p < procs; p++ { + done := make(chan bool) go func() { - foo := 0 - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - Runtime_Semrelease(&sem) - if work { - for i := 0; i < LocalWork; i++ { - foo *= 2 - foo /= 2 - } - } - Runtime_Semacquire(&sem) - } + for p := 0; p < runtime.GOMAXPROCS(0)/2; p++ { + Runtime_Semacquire(&sem) } - c <- foo == 42 - Runtime_Semrelease(&sem) + done <- true + }() + defer func() { + <-done }() } - if block { - for p := 0; p < procs/2; p++ { - <-c2 + b.RunParallel(func(pb *testing.PB) { + foo := 0 + for pb.Next() { + Runtime_Semrelease(&sem) + if work { + for i := 0; i < 100; i++ { + foo *= 2 + foo /= 2 + } + } + Runtime_Semacquire(&sem) } - } - for p := 0; p < procs; p++ { - <-c - } + _ = foo + Runtime_Semrelease(&sem) + }) } func BenchmarkSemaSyntNonblock(b *testing.B) { diff --git a/src/pkg/sync/rwmutex_test.go b/src/pkg/sync/rwmutex_test.go index 39d5d6540de..0436f97239c 100644 --- a/src/pkg/sync/rwmutex_test.go +++ b/src/pkg/sync/rwmutex_test.go @@ -160,64 +160,39 @@ func BenchmarkRWMutexUncontended(b *testing.B) { RWMutex pad [32]uint32 } - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) - for p := 0; p < procs; p++ { - go func() { - var rwm PaddedRWMutex - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - rwm.RLock() - rwm.RLock() - rwm.RUnlock() - rwm.RUnlock() - rwm.Lock() - rwm.Unlock() - } - } - c <- true - }() - } - for p := 0; p < procs; p++ { - <-c - } + b.RunParallel(func(pb *testing.PB) { + var rwm PaddedRWMutex + for pb.Next() { + rwm.RLock() + rwm.RLock() + rwm.RUnlock() + rwm.RUnlock() + rwm.Lock() + rwm.Unlock() + } + }) } func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) { - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) var rwm RWMutex - for p := 0; p < procs; p++ { - go func() { - foo := 0 - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - foo++ - if foo%writeRatio == 0 { - rwm.Lock() - rwm.Unlock() - } else { - rwm.RLock() - for i := 0; i != localWork; i += 1 { - foo *= 2 - foo /= 2 - } - rwm.RUnlock() - } + b.RunParallel(func(pb *testing.PB) { + foo := 0 + for pb.Next() { + foo++ + if foo%writeRatio == 0 { + rwm.Lock() + rwm.Unlock() + } else { + rwm.RLock() + for i := 0; i != localWork; i += 1 { + foo *= 2 + foo /= 2 } + rwm.RUnlock() } - c <- foo == 42 - }() - } - for p := 0; p < procs; p++ { - <-c - } + } + _ = foo + }) } func BenchmarkRWMutexWrite100(b *testing.B) { diff --git a/src/pkg/sync/waitgroup_test.go b/src/pkg/sync/waitgroup_test.go index 84c4cfc37a3..0cbd51056a7 100644 --- a/src/pkg/sync/waitgroup_test.go +++ b/src/pkg/sync/waitgroup_test.go @@ -5,9 +5,7 @@ package sync_test import ( - "runtime" . "sync" - "sync/atomic" "testing" ) @@ -66,55 +64,30 @@ func BenchmarkWaitGroupUncontended(b *testing.B) { WaitGroup pad [128]uint8 } - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) - for p := 0; p < procs; p++ { - go func() { - var wg PaddedWaitGroup - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - wg.Add(1) - wg.Done() - wg.Wait() - } - } - c <- true - }() - } - for p := 0; p < procs; p++ { - <-c - } + b.RunParallel(func(pb *testing.PB) { + var wg PaddedWaitGroup + for pb.Next() { + wg.Add(1) + wg.Done() + wg.Wait() + } + }) } func benchmarkWaitGroupAddDone(b *testing.B, localWork int) { - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) var wg WaitGroup - for p := 0; p < procs; p++ { - go func() { - foo := 0 - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - wg.Add(1) - for i := 0; i < localWork; i++ { - foo *= 2 - foo /= 2 - } - wg.Done() - } + b.RunParallel(func(pb *testing.PB) { + foo := 0 + for pb.Next() { + wg.Add(1) + for i := 0; i < localWork; i++ { + foo *= 2 + foo /= 2 } - c <- foo == 42 - }() - } - for p := 0; p < procs; p++ { - <-c - } + wg.Done() + } + _ = foo + }) } func BenchmarkWaitGroupAddDone(b *testing.B) { @@ -126,34 +99,18 @@ func BenchmarkWaitGroupAddDoneWork(b *testing.B) { } func benchmarkWaitGroupWait(b *testing.B, localWork int) { - const CallsPerSched = 1000 - procs := runtime.GOMAXPROCS(-1) - N := int32(b.N / CallsPerSched) - c := make(chan bool, procs) var wg WaitGroup - wg.Add(procs) - for p := 0; p < procs; p++ { - go wg.Done() - } - for p := 0; p < procs; p++ { - go func() { - foo := 0 - for atomic.AddInt32(&N, -1) >= 0 { - runtime.Gosched() - for g := 0; g < CallsPerSched; g++ { - wg.Wait() - for i := 0; i < localWork; i++ { - foo *= 2 - foo /= 2 - } - } + b.RunParallel(func(pb *testing.PB) { + foo := 0 + for pb.Next() { + wg.Wait() + for i := 0; i < localWork; i++ { + foo *= 2 + foo /= 2 } - c <- foo == 42 - }() - } - for p := 0; p < procs; p++ { - <-c - } + } + _ = foo + }) } func BenchmarkWaitGroupWait(b *testing.B) {