mirror of
https://github.com/golang/go
synced 2024-10-04 22:21:22 -06:00
ee6e1a3ff7
benchmark old ns/op new ns/op delta BenchmarkWaitGroupUncontended 93.50 33.60 -64.06% BenchmarkWaitGroupUncontended-2 44.30 16.90 -61.85% BenchmarkWaitGroupUncontended-4 21.80 8.47 -61.15% BenchmarkWaitGroupUncontended-8 12.10 4.86 -59.83% BenchmarkWaitGroupUncontended-16 7.38 3.35 -54.61% BenchmarkWaitGroupAddDone 58.40 33.70 -42.29% BenchmarkWaitGroupAddDone-2 293.00 85.80 -70.72% BenchmarkWaitGroupAddDone-4 243.00 51.10 -78.97% BenchmarkWaitGroupAddDone-8 236.00 52.20 -77.88% BenchmarkWaitGroupAddDone-16 215.00 43.30 -79.86% BenchmarkWaitGroupAddDoneWork 826.00 794.00 -3.87% BenchmarkWaitGroupAddDoneWork-2 450.00 424.00 -5.78% BenchmarkWaitGroupAddDoneWork-4 277.00 220.00 -20.58% BenchmarkWaitGroupAddDoneWork-8 440.00 116.00 -73.64% BenchmarkWaitGroupAddDoneWork-16 569.00 66.50 -88.31% BenchmarkWaitGroupWait 29.00 8.04 -72.28% BenchmarkWaitGroupWait-2 74.10 4.15 -94.40% BenchmarkWaitGroupWait-4 117.00 2.30 -98.03% BenchmarkWaitGroupWait-8 111.00 1.31 -98.82% BenchmarkWaitGroupWait-16 104.00 1.27 -98.78% BenchmarkWaitGroupWaitWork 802.00 792.00 -1.25% BenchmarkWaitGroupWaitWork-2 411.00 401.00 -2.43% BenchmarkWaitGroupWaitWork-4 210.00 199.00 -5.24% BenchmarkWaitGroupWaitWork-8 206.00 105.00 -49.03% BenchmarkWaitGroupWaitWork-16 334.00 54.40 -83.71% R=rsc CC=golang-dev https://golang.org/cl/4672050
166 lines
3.1 KiB
Go
166 lines
3.1 KiB
Go
// Copyright 2011 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package sync_test
|
|
|
|
import (
|
|
"runtime"
|
|
. "sync"
|
|
"sync/atomic"
|
|
"testing"
|
|
)
|
|
|
|
func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) {
|
|
n := 16
|
|
wg1.Add(n)
|
|
wg2.Add(n)
|
|
exited := make(chan bool, n)
|
|
for i := 0; i != n; i++ {
|
|
go func(i int) {
|
|
wg1.Done()
|
|
wg2.Wait()
|
|
exited <- true
|
|
}(i)
|
|
}
|
|
wg1.Wait()
|
|
for i := 0; i != n; i++ {
|
|
select {
|
|
case <-exited:
|
|
t.Fatal("WaitGroup released group too soon")
|
|
default:
|
|
}
|
|
wg2.Done()
|
|
}
|
|
for i := 0; i != n; i++ {
|
|
<-exited // Will block if barrier fails to unlock someone.
|
|
}
|
|
}
|
|
|
|
func TestWaitGroup(t *testing.T) {
|
|
wg1 := &WaitGroup{}
|
|
wg2 := &WaitGroup{}
|
|
|
|
// Run the same test a few times to ensure barrier is in a proper state.
|
|
for i := 0; i != 8; i++ {
|
|
testWaitGroup(t, wg1, wg2)
|
|
}
|
|
}
|
|
|
|
func TestWaitGroupMisuse(t *testing.T) {
|
|
defer func() {
|
|
err := recover()
|
|
if err != "sync: negative WaitGroup count" {
|
|
t.Fatalf("Unexpected panic: %#v", err)
|
|
}
|
|
}()
|
|
wg := &WaitGroup{}
|
|
wg.Add(1)
|
|
wg.Done()
|
|
wg.Done()
|
|
t.Fatal("Should panic")
|
|
}
|
|
|
|
func BenchmarkWaitGroupUncontended(b *testing.B) {
|
|
type PaddedWaitGroup struct {
|
|
WaitGroup
|
|
pad [128]uint8
|
|
}
|
|
const CallsPerSched = 1000
|
|
procs := runtime.GOMAXPROCS(-1)
|
|
N := int32(b.N / CallsPerSched)
|
|
c := make(chan bool, procs)
|
|
for p := 0; p < procs; p++ {
|
|
go func() {
|
|
var wg PaddedWaitGroup
|
|
for atomic.AddInt32(&N, -1) >= 0 {
|
|
runtime.Gosched()
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
wg.Add(1)
|
|
wg.Done()
|
|
wg.Wait()
|
|
}
|
|
}
|
|
c <- true
|
|
}()
|
|
}
|
|
for p := 0; p < procs; p++ {
|
|
<-c
|
|
}
|
|
}
|
|
|
|
func benchmarkWaitGroupAddDone(b *testing.B, localWork int) {
|
|
const CallsPerSched = 1000
|
|
procs := runtime.GOMAXPROCS(-1)
|
|
N := int32(b.N / CallsPerSched)
|
|
c := make(chan bool, procs)
|
|
var wg WaitGroup
|
|
for p := 0; p < procs; p++ {
|
|
go func() {
|
|
foo := 0
|
|
for atomic.AddInt32(&N, -1) >= 0 {
|
|
runtime.Gosched()
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
wg.Add(1)
|
|
for i := 0; i < localWork; i++ {
|
|
foo *= 2
|
|
foo /= 2
|
|
}
|
|
wg.Done()
|
|
}
|
|
}
|
|
c <- foo == 42
|
|
}()
|
|
}
|
|
for p := 0; p < procs; p++ {
|
|
<-c
|
|
}
|
|
}
|
|
|
|
func BenchmarkWaitGroupAddDone(b *testing.B) {
|
|
benchmarkWaitGroupAddDone(b, 0)
|
|
}
|
|
|
|
func BenchmarkWaitGroupAddDoneWork(b *testing.B) {
|
|
benchmarkWaitGroupAddDone(b, 100)
|
|
}
|
|
|
|
func benchmarkWaitGroupWait(b *testing.B, localWork int) {
|
|
const CallsPerSched = 1000
|
|
procs := runtime.GOMAXPROCS(-1)
|
|
N := int32(b.N / CallsPerSched)
|
|
c := make(chan bool, procs)
|
|
var wg WaitGroup
|
|
wg.Add(procs)
|
|
for p := 0; p < procs; p++ {
|
|
go wg.Done()
|
|
}
|
|
for p := 0; p < procs; p++ {
|
|
go func() {
|
|
foo := 0
|
|
for atomic.AddInt32(&N, -1) >= 0 {
|
|
runtime.Gosched()
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
wg.Wait()
|
|
for i := 0; i < localWork; i++ {
|
|
foo *= 2
|
|
foo /= 2
|
|
}
|
|
}
|
|
}
|
|
c <- foo == 42
|
|
}()
|
|
}
|
|
for p := 0; p < procs; p++ {
|
|
<-c
|
|
}
|
|
}
|
|
|
|
func BenchmarkWaitGroupWait(b *testing.B) {
|
|
benchmarkWaitGroupWait(b, 0)
|
|
}
|
|
|
|
func BenchmarkWaitGroupWaitWork(b *testing.B) {
|
|
benchmarkWaitGroupWait(b, 100)
|
|
}
|