1
0
mirror of https://github.com/golang/go synced 2024-11-19 14:54:43 -07:00
go/src/pkg/runtime/chan_test.go

383 lines
6.7 KiB
Go
Raw Normal View History

runtime: improve performance of sync channels 1. SudoG always contains a pointer to the element (thus no variable size, and less copying). 2. chansend/chanrecv allocate SudoG on the stack. 3. Copying of elements and gorotuine notifications are moved out of critical sections. benchmark old ns/op new ns/op delta BenchmarkSelectUncontended 515.00 514.00 -0.19% BenchmarkSelectUncontended-2 291.00 281.00 -3.44% BenchmarkSelectUncontended-4 213.00 189.00 -11.27% BenchmarkSelectUncontended-8 78.30 79.00 +0.89% BenchmarkSelectContended 518.00 514.00 -0.77% BenchmarkSelectContended-2 655.00 631.00 -3.66% BenchmarkSelectContended-4 1026.00 1051.00 +2.44% BenchmarkSelectContended-8 2026.00 2128.00 +5.03% BenchmarkSelectNonblock 175.00 173.00 -1.14% BenchmarkSelectNonblock-2 85.10 87.70 +3.06% BenchmarkSelectNonblock-4 60.10 43.30 -27.95% BenchmarkSelectNonblock-8 37.60 25.50 -32.18% BenchmarkChanUncontended 109.00 114.00 +4.59% BenchmarkChanUncontended-2 54.60 57.20 +4.76% BenchmarkChanUncontended-4 27.40 28.70 +4.74% BenchmarkChanUncontended-8 14.60 15.10 +3.42% BenchmarkChanContended 108.00 114.00 +5.56% BenchmarkChanContended-2 621.00 617.00 -0.64% BenchmarkChanContended-4 759.00 677.00 -10.80% BenchmarkChanContended-8 1635.00 1517.00 -7.22% BenchmarkChanSync 299.00 256.00 -14.38% BenchmarkChanSync-2 5055.00 4624.00 -8.53% BenchmarkChanSync-4 4998.00 4680.00 -6.36% BenchmarkChanSync-8 5019.00 4760.00 -5.16% BenchmarkChanProdCons0 316.00 274.00 -13.29% BenchmarkChanProdCons0-2 1280.00 617.00 -51.80% BenchmarkChanProdCons0-4 2433.00 1332.00 -45.25% BenchmarkChanProdCons0-8 3651.00 1934.00 -47.03% BenchmarkChanProdCons10 153.00 152.00 -0.65% BenchmarkChanProdCons10-2 626.00 581.00 -7.19% BenchmarkChanProdCons10-4 1440.00 1323.00 -8.12% BenchmarkChanProdCons10-8 2036.00 2017.00 -0.93% R=rsc, ken CC=golang-dev https://golang.org/cl/4790042
2011-07-20 09:51:25 -06:00
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime_test
import (
"runtime"
"sync"
runtime: improve performance of sync channels 1. SudoG always contains a pointer to the element (thus no variable size, and less copying). 2. chansend/chanrecv allocate SudoG on the stack. 3. Copying of elements and gorotuine notifications are moved out of critical sections. benchmark old ns/op new ns/op delta BenchmarkSelectUncontended 515.00 514.00 -0.19% BenchmarkSelectUncontended-2 291.00 281.00 -3.44% BenchmarkSelectUncontended-4 213.00 189.00 -11.27% BenchmarkSelectUncontended-8 78.30 79.00 +0.89% BenchmarkSelectContended 518.00 514.00 -0.77% BenchmarkSelectContended-2 655.00 631.00 -3.66% BenchmarkSelectContended-4 1026.00 1051.00 +2.44% BenchmarkSelectContended-8 2026.00 2128.00 +5.03% BenchmarkSelectNonblock 175.00 173.00 -1.14% BenchmarkSelectNonblock-2 85.10 87.70 +3.06% BenchmarkSelectNonblock-4 60.10 43.30 -27.95% BenchmarkSelectNonblock-8 37.60 25.50 -32.18% BenchmarkChanUncontended 109.00 114.00 +4.59% BenchmarkChanUncontended-2 54.60 57.20 +4.76% BenchmarkChanUncontended-4 27.40 28.70 +4.74% BenchmarkChanUncontended-8 14.60 15.10 +3.42% BenchmarkChanContended 108.00 114.00 +5.56% BenchmarkChanContended-2 621.00 617.00 -0.64% BenchmarkChanContended-4 759.00 677.00 -10.80% BenchmarkChanContended-8 1635.00 1517.00 -7.22% BenchmarkChanSync 299.00 256.00 -14.38% BenchmarkChanSync-2 5055.00 4624.00 -8.53% BenchmarkChanSync-4 4998.00 4680.00 -6.36% BenchmarkChanSync-8 5019.00 4760.00 -5.16% BenchmarkChanProdCons0 316.00 274.00 -13.29% BenchmarkChanProdCons0-2 1280.00 617.00 -51.80% BenchmarkChanProdCons0-4 2433.00 1332.00 -45.25% BenchmarkChanProdCons0-8 3651.00 1934.00 -47.03% BenchmarkChanProdCons10 153.00 152.00 -0.65% BenchmarkChanProdCons10-2 626.00 581.00 -7.19% BenchmarkChanProdCons10-4 1440.00 1323.00 -8.12% BenchmarkChanProdCons10-8 2036.00 2017.00 -0.93% R=rsc, ken CC=golang-dev https://golang.org/cl/4790042
2011-07-20 09:51:25 -06:00
"sync/atomic"
"testing"
)
func TestChanSendInterface(t *testing.T) {
type mt struct{}
m := &mt{}
c := make(chan interface{}, 1)
c <- m
select {
case c <- m:
default:
}
select {
case c <- m:
case c <- &mt{}:
default:
}
}
func TestPseudoRandomSend(t *testing.T) {
n := 100
c := make(chan int)
l := make([]int, n)
var m sync.Mutex
m.Lock()
go func() {
for i := 0; i < n; i++ {
runtime.Gosched()
l[i] = <-c
}
m.Unlock()
}()
for i := 0; i < n; i++ {
select {
case c <- 0:
case c <- 1:
}
}
m.Lock() // wait
n0 := 0
n1 := 0
for _, i := range l {
n0 += (i + 1) % 2
n1 += i
if n0 > n/10 && n1 > n/10 {
return
}
}
t.Errorf("Want pseudo random, got %d zeros and %d ones", n0, n1)
}
func TestMultiConsumer(t *testing.T) {
const nwork = 23
const niter = 271828
pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
q := make(chan int, nwork*3)
r := make(chan int, nwork*3)
// workers
var wg sync.WaitGroup
for i := 0; i < nwork; i++ {
wg.Add(1)
go func(w int) {
for v := range q {
// mess with the fifo-ish nature of range
if pn[w%len(pn)] == v {
runtime.Gosched()
}
r <- v
}
wg.Done()
}(i)
}
// feeder & closer
expect := 0
go func() {
for i := 0; i < niter; i++ {
v := pn[i%len(pn)]
expect += v
q <- v
}
close(q) // no more work
wg.Wait() // workers done
close(r) // ... so there can be no more results
}()
// consume & check
n := 0
s := 0
for v := range r {
n++
s += v
}
if n != niter || s != expect {
t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
expect, s, niter, n)
}
}
runtime: improve performance of sync channels 1. SudoG always contains a pointer to the element (thus no variable size, and less copying). 2. chansend/chanrecv allocate SudoG on the stack. 3. Copying of elements and gorotuine notifications are moved out of critical sections. benchmark old ns/op new ns/op delta BenchmarkSelectUncontended 515.00 514.00 -0.19% BenchmarkSelectUncontended-2 291.00 281.00 -3.44% BenchmarkSelectUncontended-4 213.00 189.00 -11.27% BenchmarkSelectUncontended-8 78.30 79.00 +0.89% BenchmarkSelectContended 518.00 514.00 -0.77% BenchmarkSelectContended-2 655.00 631.00 -3.66% BenchmarkSelectContended-4 1026.00 1051.00 +2.44% BenchmarkSelectContended-8 2026.00 2128.00 +5.03% BenchmarkSelectNonblock 175.00 173.00 -1.14% BenchmarkSelectNonblock-2 85.10 87.70 +3.06% BenchmarkSelectNonblock-4 60.10 43.30 -27.95% BenchmarkSelectNonblock-8 37.60 25.50 -32.18% BenchmarkChanUncontended 109.00 114.00 +4.59% BenchmarkChanUncontended-2 54.60 57.20 +4.76% BenchmarkChanUncontended-4 27.40 28.70 +4.74% BenchmarkChanUncontended-8 14.60 15.10 +3.42% BenchmarkChanContended 108.00 114.00 +5.56% BenchmarkChanContended-2 621.00 617.00 -0.64% BenchmarkChanContended-4 759.00 677.00 -10.80% BenchmarkChanContended-8 1635.00 1517.00 -7.22% BenchmarkChanSync 299.00 256.00 -14.38% BenchmarkChanSync-2 5055.00 4624.00 -8.53% BenchmarkChanSync-4 4998.00 4680.00 -6.36% BenchmarkChanSync-8 5019.00 4760.00 -5.16% BenchmarkChanProdCons0 316.00 274.00 -13.29% BenchmarkChanProdCons0-2 1280.00 617.00 -51.80% BenchmarkChanProdCons0-4 2433.00 1332.00 -45.25% BenchmarkChanProdCons0-8 3651.00 1934.00 -47.03% BenchmarkChanProdCons10 153.00 152.00 -0.65% BenchmarkChanProdCons10-2 626.00 581.00 -7.19% BenchmarkChanProdCons10-4 1440.00 1323.00 -8.12% BenchmarkChanProdCons10-8 2036.00 2017.00 -0.93% R=rsc, ken CC=golang-dev https://golang.org/cl/4790042
2011-07-20 09:51:25 -06:00
func BenchmarkSelectUncontended(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
myc1 := make(chan int, 1)
myc2 := make(chan int, 1)
myc1 <- 0
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
select {
case <-myc1:
myc2 <- 0
case <-myc2:
myc1 <- 0
}
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
func BenchmarkSelectContended(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
myc1 := make(chan int, procs)
myc2 := make(chan int, procs)
for p := 0; p < procs; p++ {
myc1 <- 0
go func() {
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
select {
case <-myc1:
myc2 <- 0
case <-myc2:
myc1 <- 0
}
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
func BenchmarkSelectNonblock(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
myc1 := make(chan int)
myc2 := make(chan int)
myc3 := make(chan int, 1)
myc4 := make(chan int, 1)
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
select {
case <-myc1:
default:
}
select {
case myc2 <- 0:
default:
}
select {
case <-myc3:
default:
}
select {
case myc4 <- 0:
default:
}
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
func BenchmarkChanUncontended(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
myc := make(chan int, CallsPerSched)
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
myc <- 0
}
for g := 0; g < CallsPerSched; g++ {
<-myc
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
func BenchmarkChanContended(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
myc := make(chan int, procs*CallsPerSched)
for p := 0; p < procs; p++ {
go func() {
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
myc <- 0
}
for g := 0; g < CallsPerSched; g++ {
<-myc
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
func BenchmarkChanSync(b *testing.B) {
const CallsPerSched = 1000
procs := 2
N := int32(b.N / CallsPerSched / procs * procs)
c := make(chan bool, procs)
myc := make(chan int)
for p := 0; p < procs; p++ {
go func() {
for {
i := atomic.AddInt32(&N, -1)
if i < 0 {
break
}
for g := 0; g < CallsPerSched; g++ {
if i%2 == 0 {
<-myc
myc <- 0
} else {
myc <- 0
<-myc
}
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, 2*procs)
myc := make(chan int, chanSize)
for p := 0; p < procs; p++ {
go func() {
foo := 0
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
for i := 0; i < localWork; i++ {
foo *= 2
foo /= 2
}
myc <- 1
}
}
myc <- 0
c <- foo == 42
}()
go func() {
foo := 0
for {
v := <-myc
if v == 0 {
break
}
for i := 0; i < localWork; i++ {
foo *= 2
foo /= 2
}
}
c <- foo == 42
}()
}
for p := 0; p < procs; p++ {
<-c
<-c
}
}
func BenchmarkChanProdCons0(b *testing.B) {
benchmarkChanProdCons(b, 0, 0)
}
func BenchmarkChanProdCons10(b *testing.B) {
benchmarkChanProdCons(b, 10, 0)
}
func BenchmarkChanProdCons100(b *testing.B) {
benchmarkChanProdCons(b, 100, 0)
}
func BenchmarkChanProdConsWork0(b *testing.B) {
benchmarkChanProdCons(b, 0, 100)
}
func BenchmarkChanProdConsWork10(b *testing.B) {
benchmarkChanProdCons(b, 10, 100)
}
func BenchmarkChanProdConsWork100(b *testing.B) {
benchmarkChanProdCons(b, 100, 100)
}
func BenchmarkChanCreation(b *testing.B) {
const CallsPerSched = 1000
procs := runtime.GOMAXPROCS(-1)
N := int32(b.N / CallsPerSched)
c := make(chan bool, procs)
for p := 0; p < procs; p++ {
go func() {
for atomic.AddInt32(&N, -1) >= 0 {
for g := 0; g < CallsPerSched; g++ {
myc := make(chan int, 1)
myc <- 0
<-myc
}
}
c <- true
}()
}
for p := 0; p < procs; p++ {
<-c
}
}
func BenchmarkChanSem(b *testing.B) {
type Empty struct{}
c := make(chan Empty, 1)
for i := 0; i < b.N; i++ {
c <- Empty{}
<-c
}
}