2011-07-20 09:51:25 -06:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package runtime_test
|
|
|
|
|
|
|
|
import (
|
|
|
|
"runtime"
|
2011-08-15 00:51:51 -06:00
|
|
|
"sync"
|
2011-07-20 09:51:25 -06:00
|
|
|
"sync/atomic"
|
|
|
|
"testing"
|
2014-01-28 11:45:14 -07:00
|
|
|
"time"
|
2011-07-20 09:51:25 -06:00
|
|
|
)
|
|
|
|
|
2014-01-28 11:45:14 -07:00
|
|
|
func TestChan(t *testing.T) {
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
|
|
|
|
N := 200
|
|
|
|
if testing.Short() {
|
|
|
|
N = 20
|
|
|
|
}
|
|
|
|
for chanCap := 0; chanCap < N; chanCap++ {
|
|
|
|
{
|
|
|
|
// Ensure that receive from empty chan blocks.
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
recv1 := false
|
|
|
|
go func() {
|
|
|
|
_ = <-c
|
|
|
|
recv1 = true
|
|
|
|
}()
|
|
|
|
recv2 := false
|
|
|
|
go func() {
|
|
|
|
_, _ = <-c
|
|
|
|
recv2 = true
|
|
|
|
}()
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
if recv1 || recv2 {
|
|
|
|
t.Fatalf("chan[%d]: receive from empty chan", chanCap)
|
|
|
|
}
|
|
|
|
// Ensure that non-blocking receive does not block.
|
|
|
|
select {
|
|
|
|
case _ = <-c:
|
|
|
|
t.Fatalf("chan[%d]: receive from empty chan", chanCap)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case _, _ = <-c:
|
|
|
|
t.Fatalf("chan[%d]: receive from empty chan", chanCap)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
c <- 0
|
|
|
|
c <- 0
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Ensure that send to full chan blocks.
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
for i := 0; i < chanCap; i++ {
|
|
|
|
c <- i
|
|
|
|
}
|
|
|
|
sent := uint32(0)
|
|
|
|
go func() {
|
|
|
|
c <- 0
|
|
|
|
atomic.StoreUint32(&sent, 1)
|
|
|
|
}()
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
if atomic.LoadUint32(&sent) != 0 {
|
|
|
|
t.Fatalf("chan[%d]: send to full chan", chanCap)
|
|
|
|
}
|
|
|
|
// Ensure that non-blocking send does not block.
|
|
|
|
select {
|
|
|
|
case c <- 0:
|
|
|
|
t.Fatalf("chan[%d]: send to full chan", chanCap)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Ensure that we receive 0 from closed chan.
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
for i := 0; i < chanCap; i++ {
|
|
|
|
c <- i
|
|
|
|
}
|
|
|
|
close(c)
|
|
|
|
for i := 0; i < chanCap; i++ {
|
|
|
|
v := <-c
|
|
|
|
if v != i {
|
|
|
|
t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if v := <-c; v != 0 {
|
|
|
|
t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, 0)
|
|
|
|
}
|
|
|
|
if v, ok := <-c; v != 0 || ok {
|
|
|
|
t.Fatalf("chan[%d]: received %v/%v, expected %v/%v", chanCap, v, ok, 0, false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Ensure that close unblocks receive.
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
done := make(chan bool)
|
|
|
|
go func() {
|
|
|
|
v, ok := <-c
|
|
|
|
done <- v == 0 && ok == false
|
|
|
|
}()
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
close(c)
|
|
|
|
if !<-done {
|
|
|
|
t.Fatalf("chan[%d]: received non zero from closed chan", chanCap)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Send 100 integers,
|
|
|
|
// ensure that we receive them non-corrupted in FIFO order.
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
c <- i
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
v := <-c
|
|
|
|
if v != i {
|
|
|
|
t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Same, but using recv2.
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
c <- i
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
v, ok := <-c
|
|
|
|
if !ok {
|
2014-01-31 07:09:53 -07:00
|
|
|
t.Fatalf("chan[%d]: receive failed, expected %v", chanCap, i)
|
2014-01-28 11:45:14 -07:00
|
|
|
}
|
|
|
|
if v != i {
|
2014-01-31 07:09:53 -07:00
|
|
|
t.Fatalf("chan[%d]: received %v, expected %v", chanCap, v, i)
|
2014-01-28 11:45:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Send 1000 integers in 4 goroutines,
|
|
|
|
// ensure that we receive what we send.
|
|
|
|
const P = 4
|
|
|
|
const L = 1000
|
|
|
|
for p := 0; p < P; p++ {
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < L; i++ {
|
|
|
|
c <- i
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
done := make(chan map[int]int)
|
|
|
|
for p := 0; p < P; p++ {
|
|
|
|
go func() {
|
|
|
|
recv := make(map[int]int)
|
|
|
|
for i := 0; i < L; i++ {
|
|
|
|
v := <-c
|
|
|
|
recv[v] = recv[v] + 1
|
|
|
|
}
|
|
|
|
done <- recv
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
recv := make(map[int]int)
|
|
|
|
for p := 0; p < P; p++ {
|
|
|
|
for k, v := range <-done {
|
|
|
|
recv[k] = recv[k] + v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(recv) != L {
|
2014-01-31 07:09:53 -07:00
|
|
|
t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, len(recv), L)
|
2014-01-28 11:45:14 -07:00
|
|
|
}
|
|
|
|
for _, v := range recv {
|
|
|
|
if v != P {
|
2014-01-31 07:09:53 -07:00
|
|
|
t.Fatalf("chan[%d]: received %v values, expected %v", chanCap, v, P)
|
2014-01-28 11:45:14 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
// Test len/cap.
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
if len(c) != 0 || cap(c) != chanCap {
|
|
|
|
t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, 0, chanCap, len(c), cap(c))
|
|
|
|
}
|
|
|
|
for i := 0; i < chanCap; i++ {
|
|
|
|
c <- i
|
|
|
|
}
|
|
|
|
if len(c) != chanCap || cap(c) != chanCap {
|
|
|
|
t.Fatalf("chan[%d]: bad len/cap, expect %v/%v, got %v/%v", chanCap, chanCap, chanCap, len(c), cap(c))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-25 01:55:42 -06:00
|
|
|
func TestNonblockRecvRace(t *testing.T) {
|
|
|
|
n := 10000
|
|
|
|
if testing.Short() {
|
|
|
|
n = 100
|
|
|
|
}
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
c := make(chan int, 1)
|
|
|
|
c <- 1
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-c:
|
|
|
|
default:
|
|
|
|
t.Fatal("chan is not ready")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
close(c)
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-28 11:45:14 -07:00
|
|
|
func TestSelfSelect(t *testing.T) {
|
|
|
|
// Ensure that send/recv on the same chan in select
|
|
|
|
// does not crash nor deadlock.
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
|
|
|
|
for _, chanCap := range []int{0, 10} {
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(2)
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
for p := 0; p < 2; p++ {
|
|
|
|
p := p
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
if p == 0 || i%2 == 0 {
|
|
|
|
select {
|
|
|
|
case c <- p:
|
|
|
|
case v := <-c:
|
|
|
|
if chanCap == 0 && v == p {
|
|
|
|
t.Fatalf("self receive")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
select {
|
|
|
|
case v := <-c:
|
|
|
|
if chanCap == 0 && v == p {
|
|
|
|
t.Fatalf("self receive")
|
|
|
|
}
|
|
|
|
case c <- p:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestSelectStress(t *testing.T) {
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(10))
|
|
|
|
var c [4]chan int
|
|
|
|
c[0] = make(chan int)
|
|
|
|
c[1] = make(chan int)
|
|
|
|
c[2] = make(chan int, 2)
|
|
|
|
c[3] = make(chan int, 3)
|
|
|
|
N := int(1e5)
|
|
|
|
if testing.Short() {
|
|
|
|
N /= 10
|
|
|
|
}
|
|
|
|
// There are 4 goroutines that send N values on each of the chans,
|
|
|
|
// + 4 goroutines that receive N values on each of the chans,
|
|
|
|
// + 1 goroutine that sends N values on each of the chans in a single select,
|
|
|
|
// + 1 goroutine that receives N values on each of the chans in a single select.
|
|
|
|
// All these sends, receives and selects interact chaotically at runtime,
|
|
|
|
// but we are careful that this whole construct does not deadlock.
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(10)
|
|
|
|
for k := 0; k < 4; k++ {
|
|
|
|
k := k
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
c[k] <- 0
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
<-c[k]
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
var n [4]int
|
|
|
|
c1 := c
|
|
|
|
for i := 0; i < 4*N; i++ {
|
|
|
|
select {
|
|
|
|
case c1[3] <- 0:
|
|
|
|
n[3]++
|
|
|
|
if n[3] == N {
|
|
|
|
c1[3] = nil
|
|
|
|
}
|
|
|
|
case c1[2] <- 0:
|
|
|
|
n[2]++
|
|
|
|
if n[2] == N {
|
|
|
|
c1[2] = nil
|
|
|
|
}
|
|
|
|
case c1[0] <- 0:
|
|
|
|
n[0]++
|
|
|
|
if n[0] == N {
|
|
|
|
c1[0] = nil
|
|
|
|
}
|
|
|
|
case c1[1] <- 0:
|
|
|
|
n[1]++
|
|
|
|
if n[1] == N {
|
|
|
|
c1[1] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
var n [4]int
|
|
|
|
c1 := c
|
|
|
|
for i := 0; i < 4*N; i++ {
|
|
|
|
select {
|
|
|
|
case <-c1[0]:
|
|
|
|
n[0]++
|
|
|
|
if n[0] == N {
|
|
|
|
c1[0] = nil
|
|
|
|
}
|
|
|
|
case <-c1[1]:
|
|
|
|
n[1]++
|
|
|
|
if n[1] == N {
|
|
|
|
c1[1] = nil
|
|
|
|
}
|
|
|
|
case <-c1[2]:
|
|
|
|
n[2]++
|
|
|
|
if n[2] == N {
|
|
|
|
c1[2] = nil
|
|
|
|
}
|
|
|
|
case <-c1[3]:
|
|
|
|
n[3]++
|
|
|
|
if n[3] == N {
|
|
|
|
c1[3] = nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
|
2011-07-25 10:25:37 -06:00
|
|
|
func TestChanSendInterface(t *testing.T) {
|
|
|
|
type mt struct{}
|
|
|
|
m := &mt{}
|
|
|
|
c := make(chan interface{}, 1)
|
|
|
|
c <- m
|
|
|
|
select {
|
|
|
|
case c <- m:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case c <- m:
|
|
|
|
case c <- &mt{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-15 00:51:51 -06:00
|
|
|
func TestPseudoRandomSend(t *testing.T) {
|
|
|
|
n := 100
|
2014-01-28 11:45:14 -07:00
|
|
|
for _, chanCap := range []int{0, n} {
|
|
|
|
c := make(chan int, chanCap)
|
|
|
|
l := make([]int, n)
|
|
|
|
var m sync.Mutex
|
|
|
|
m.Lock()
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
runtime.Gosched()
|
|
|
|
l[i] = <-c
|
|
|
|
}
|
|
|
|
m.Unlock()
|
|
|
|
}()
|
2011-08-15 00:51:51 -06:00
|
|
|
for i := 0; i < n; i++ {
|
2014-01-28 11:45:14 -07:00
|
|
|
select {
|
|
|
|
case c <- 1:
|
|
|
|
case c <- 0:
|
|
|
|
}
|
2011-08-15 00:51:51 -06:00
|
|
|
}
|
2014-01-28 11:45:14 -07:00
|
|
|
m.Lock() // wait
|
|
|
|
n0 := 0
|
|
|
|
n1 := 0
|
|
|
|
for _, i := range l {
|
|
|
|
n0 += (i + 1) % 2
|
|
|
|
n1 += i
|
2011-08-15 00:51:51 -06:00
|
|
|
}
|
2014-01-28 11:45:14 -07:00
|
|
|
if n0 <= n/10 || n1 <= n/10 {
|
|
|
|
t.Errorf("Want pseudorandom, got %d zeros and %d ones (chan cap %d)", n0, n1, chanCap)
|
2011-08-15 00:51:51 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-05 05:40:50 -06:00
|
|
|
func TestMultiConsumer(t *testing.T) {
|
|
|
|
const nwork = 23
|
|
|
|
const niter = 271828
|
|
|
|
|
|
|
|
pn := []int{2, 3, 7, 11, 13, 17, 19, 23, 27, 31}
|
|
|
|
|
|
|
|
q := make(chan int, nwork*3)
|
|
|
|
r := make(chan int, nwork*3)
|
|
|
|
|
|
|
|
// workers
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
for i := 0; i < nwork; i++ {
|
|
|
|
wg.Add(1)
|
|
|
|
go func(w int) {
|
|
|
|
for v := range q {
|
|
|
|
// mess with the fifo-ish nature of range
|
|
|
|
if pn[w%len(pn)] == v {
|
|
|
|
runtime.Gosched()
|
|
|
|
}
|
|
|
|
r <- v
|
|
|
|
}
|
|
|
|
wg.Done()
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
|
|
|
|
// feeder & closer
|
|
|
|
expect := 0
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < niter; i++ {
|
|
|
|
v := pn[i%len(pn)]
|
|
|
|
expect += v
|
|
|
|
q <- v
|
|
|
|
}
|
|
|
|
close(q) // no more work
|
|
|
|
wg.Wait() // workers done
|
|
|
|
close(r) // ... so there can be no more results
|
|
|
|
}()
|
|
|
|
|
|
|
|
// consume & check
|
|
|
|
n := 0
|
|
|
|
s := 0
|
|
|
|
for v := range r {
|
|
|
|
n++
|
|
|
|
s += v
|
|
|
|
}
|
|
|
|
if n != niter || s != expect {
|
|
|
|
t.Errorf("Expected sum %d (got %d) from %d iter (saw %d)",
|
|
|
|
expect, s, niter, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-08-24 02:31:03 -06:00
|
|
|
func TestShrinkStackDuringBlockedSend(t *testing.T) {
|
|
|
|
// make sure that channel operations still work when we are
|
|
|
|
// blocked on a channel send and we shrink the stack.
|
|
|
|
// NOTE: this test probably won't fail unless stack.c:StackDebug
|
|
|
|
// is set to >= 1.
|
|
|
|
const n = 10
|
|
|
|
c := make(chan int)
|
|
|
|
done := make(chan struct{})
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
c <- i
|
|
|
|
// use lots of stack, briefly.
|
|
|
|
stackGrowthRecursive(20)
|
|
|
|
}
|
|
|
|
done <- struct{}{}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for i := 0; i < n; i++ {
|
|
|
|
x := <-c
|
|
|
|
if x != i {
|
|
|
|
t.Errorf("bad channel read: want %d, got %d", i, x)
|
|
|
|
}
|
|
|
|
// Waste some time so sender can finish using lots of stack
|
|
|
|
// and block in channel send.
|
|
|
|
time.Sleep(1 * time.Millisecond)
|
|
|
|
// trigger GC which will shrink the stack of the sender.
|
|
|
|
runtime.GC()
|
|
|
|
}
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
|
2014-10-18 22:02:49 -06:00
|
|
|
func TestSelectDuplicateChannel(t *testing.T) {
|
|
|
|
// This test makes sure we can queue a G on
|
|
|
|
// the same channel multiple times.
|
|
|
|
c := make(chan int)
|
|
|
|
d := make(chan int)
|
|
|
|
e := make(chan int)
|
|
|
|
|
|
|
|
// goroutine A
|
|
|
|
go func() {
|
|
|
|
select {
|
|
|
|
case <-c:
|
|
|
|
case <-c:
|
|
|
|
case <-d:
|
|
|
|
}
|
|
|
|
e <- 9
|
|
|
|
}()
|
|
|
|
time.Sleep(time.Millisecond) // make sure goroutine A gets qeueued first on c
|
|
|
|
|
|
|
|
// goroutine B
|
|
|
|
go func() {
|
|
|
|
<-c
|
|
|
|
}()
|
|
|
|
time.Sleep(time.Millisecond) // make sure goroutine B gets queued on c before continuing
|
|
|
|
|
|
|
|
d <- 7 // wake up A, it dequeues itself from c. This operation used to corrupt c.recvq.
|
|
|
|
<-e // A tells us it's done
|
|
|
|
c <- 8 // wake up B. This operation used to fail because c.recvq was corrupted (it tries to wake up an already running G instead of B)
|
|
|
|
}
|
|
|
|
|
2014-02-03 22:41:48 -07:00
|
|
|
func BenchmarkChanNonblocking(b *testing.B) {
|
|
|
|
myc := make(chan int)
|
2014-02-24 09:50:12 -07:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
select {
|
|
|
|
case <-myc:
|
|
|
|
default:
|
2014-02-03 22:41:48 -07:00
|
|
|
}
|
2014-02-24 09:50:12 -07:00
|
|
|
}
|
|
|
|
})
|
2014-02-03 22:41:48 -07:00
|
|
|
}
|
|
|
|
|
2011-07-20 09:51:25 -06:00
|
|
|
func BenchmarkSelectUncontended(b *testing.B) {
|
2014-02-16 19:29:56 -07:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
myc1 := make(chan int, 1)
|
|
|
|
myc2 := make(chan int, 1)
|
|
|
|
myc1 <- 0
|
|
|
|
for pb.Next() {
|
|
|
|
select {
|
|
|
|
case <-myc1:
|
|
|
|
myc2 <- 0
|
|
|
|
case <-myc2:
|
|
|
|
myc1 <- 0
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
2014-02-16 19:29:56 -07:00
|
|
|
}
|
|
|
|
})
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
|
|
|
|
2014-07-17 11:49:45 -06:00
|
|
|
func BenchmarkSelectSyncContended(b *testing.B) {
|
|
|
|
myc1 := make(chan int)
|
|
|
|
myc2 := make(chan int)
|
|
|
|
myc3 := make(chan int)
|
|
|
|
done := make(chan int)
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case myc1 <- 0:
|
|
|
|
case myc2 <- 0:
|
|
|
|
case myc3 <- 0:
|
|
|
|
case <-done:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
for pb.Next() {
|
|
|
|
select {
|
|
|
|
case <-myc1:
|
|
|
|
case <-myc2:
|
|
|
|
case <-myc3:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
|
|
|
close(done)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkSelectAsyncContended(b *testing.B) {
|
2014-02-16 19:29:56 -07:00
|
|
|
procs := runtime.GOMAXPROCS(0)
|
2011-07-20 09:51:25 -06:00
|
|
|
myc1 := make(chan int, procs)
|
|
|
|
myc2 := make(chan int, procs)
|
2014-02-16 19:29:56 -07:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
2011-07-20 09:51:25 -06:00
|
|
|
myc1 <- 0
|
2014-02-16 19:29:56 -07:00
|
|
|
for pb.Next() {
|
|
|
|
select {
|
|
|
|
case <-myc1:
|
|
|
|
myc2 <- 0
|
|
|
|
case <-myc2:
|
|
|
|
myc1 <- 0
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
2014-02-16 19:29:56 -07:00
|
|
|
}
|
|
|
|
})
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkSelectNonblock(b *testing.B) {
|
2014-07-17 11:49:45 -06:00
|
|
|
myc1 := make(chan int)
|
|
|
|
myc2 := make(chan int)
|
|
|
|
myc3 := make(chan int, 1)
|
|
|
|
myc4 := make(chan int, 1)
|
2014-02-16 19:29:56 -07:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
select {
|
|
|
|
case <-myc1:
|
|
|
|
default:
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
2014-02-16 19:29:56 -07:00
|
|
|
select {
|
|
|
|
case myc2 <- 0:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-myc3:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case myc4 <- 0:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanUncontended(b *testing.B) {
|
2014-02-16 19:29:56 -07:00
|
|
|
const C = 100
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
myc := make(chan int, C)
|
|
|
|
for pb.Next() {
|
|
|
|
for i := 0; i < C; i++ {
|
|
|
|
myc <- 0
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
2014-02-16 19:29:56 -07:00
|
|
|
for i := 0; i < C; i++ {
|
|
|
|
<-myc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanContended(b *testing.B) {
|
2014-02-16 19:29:56 -07:00
|
|
|
const C = 100
|
|
|
|
myc := make(chan int, C*runtime.GOMAXPROCS(0))
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
for i := 0; i < C; i++ {
|
|
|
|
myc <- 0
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
2014-02-16 19:29:56 -07:00
|
|
|
for i := 0; i < C; i++ {
|
|
|
|
<-myc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
})
|
2011-07-20 09:51:25 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanSync(b *testing.B) {
|
|
|
|
const CallsPerSched = 1000
|
|
|
|
procs := 2
|
|
|
|
N := int32(b.N / CallsPerSched / procs * procs)
|
|
|
|
c := make(chan bool, procs)
|
|
|
|
myc := make(chan int)
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
i := atomic.AddInt32(&N, -1)
|
|
|
|
if i < 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
|
|
if i%2 == 0 {
|
|
|
|
<-myc
|
|
|
|
myc <- 0
|
|
|
|
} else {
|
|
|
|
myc <- 0
|
|
|
|
<-myc
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c <- true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func benchmarkChanProdCons(b *testing.B, chanSize, localWork int) {
|
|
|
|
const CallsPerSched = 1000
|
|
|
|
procs := runtime.GOMAXPROCS(-1)
|
|
|
|
N := int32(b.N / CallsPerSched)
|
|
|
|
c := make(chan bool, 2*procs)
|
|
|
|
myc := make(chan int, chanSize)
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
go func() {
|
|
|
|
foo := 0
|
|
|
|
for atomic.AddInt32(&N, -1) >= 0 {
|
|
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
|
|
for i := 0; i < localWork; i++ {
|
|
|
|
foo *= 2
|
|
|
|
foo /= 2
|
|
|
|
}
|
|
|
|
myc <- 1
|
|
|
|
}
|
|
|
|
}
|
|
|
|
myc <- 0
|
|
|
|
c <- foo == 42
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
foo := 0
|
|
|
|
for {
|
|
|
|
v := <-myc
|
|
|
|
if v == 0 {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
for i := 0; i < localWork; i++ {
|
|
|
|
foo *= 2
|
|
|
|
foo /= 2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c <- foo == 42
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
<-c
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanProdCons0(b *testing.B) {
|
|
|
|
benchmarkChanProdCons(b, 0, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanProdCons10(b *testing.B) {
|
|
|
|
benchmarkChanProdCons(b, 10, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanProdCons100(b *testing.B) {
|
|
|
|
benchmarkChanProdCons(b, 100, 0)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanProdConsWork0(b *testing.B) {
|
|
|
|
benchmarkChanProdCons(b, 0, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanProdConsWork10(b *testing.B) {
|
|
|
|
benchmarkChanProdCons(b, 10, 100)
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkChanProdConsWork100(b *testing.B) {
|
|
|
|
benchmarkChanProdCons(b, 100, 100)
|
|
|
|
}
|
2011-08-04 06:31:03 -06:00
|
|
|
|
2014-02-03 22:41:48 -07:00
|
|
|
func BenchmarkSelectProdCons(b *testing.B) {
|
|
|
|
const CallsPerSched = 1000
|
|
|
|
procs := runtime.GOMAXPROCS(-1)
|
|
|
|
N := int32(b.N / CallsPerSched)
|
|
|
|
c := make(chan bool, 2*procs)
|
|
|
|
myc := make(chan int, 128)
|
|
|
|
myclose := make(chan bool)
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
go func() {
|
|
|
|
// Producer: sends to myc.
|
|
|
|
foo := 0
|
|
|
|
// Intended to not fire during benchmarking.
|
|
|
|
mytimer := time.After(time.Hour)
|
|
|
|
for atomic.AddInt32(&N, -1) >= 0 {
|
|
|
|
for g := 0; g < CallsPerSched; g++ {
|
|
|
|
// Model some local work.
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
foo *= 2
|
|
|
|
foo /= 2
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case myc <- 1:
|
|
|
|
case <-mytimer:
|
|
|
|
case <-myclose:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
myc <- 0
|
|
|
|
c <- foo == 42
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
// Consumer: receives from myc.
|
|
|
|
foo := 0
|
|
|
|
// Intended to not fire during benchmarking.
|
|
|
|
mytimer := time.After(time.Hour)
|
|
|
|
loop:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case v := <-myc:
|
|
|
|
if v == 0 {
|
|
|
|
break loop
|
|
|
|
}
|
|
|
|
case <-mytimer:
|
|
|
|
case <-myclose:
|
|
|
|
}
|
|
|
|
// Model some local work.
|
|
|
|
for i := 0; i < 100; i++ {
|
|
|
|
foo *= 2
|
|
|
|
foo /= 2
|
|
|
|
}
|
|
|
|
}
|
|
|
|
c <- foo == 42
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for p := 0; p < procs; p++ {
|
|
|
|
<-c
|
|
|
|
<-c
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-08-04 06:31:03 -06:00
|
|
|
func BenchmarkChanCreation(b *testing.B) {
|
2014-02-16 19:29:56 -07:00
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
myc := make(chan int, 1)
|
|
|
|
myc <- 0
|
|
|
|
<-myc
|
|
|
|
}
|
|
|
|
})
|
2011-08-04 06:31:03 -06:00
|
|
|
}
|
2012-01-19 23:32:55 -07:00
|
|
|
|
|
|
|
func BenchmarkChanSem(b *testing.B) {
|
|
|
|
type Empty struct{}
|
2014-02-24 09:50:12 -07:00
|
|
|
myc := make(chan Empty, runtime.GOMAXPROCS(0))
|
|
|
|
b.RunParallel(func(pb *testing.PB) {
|
|
|
|
for pb.Next() {
|
|
|
|
myc <- Empty{}
|
|
|
|
<-myc
|
|
|
|
}
|
|
|
|
})
|
2012-01-19 23:32:55 -07:00
|
|
|
}
|
2014-12-08 11:11:08 -07:00
|
|
|
|
|
|
|
func BenchmarkChanPopular(b *testing.B) {
|
|
|
|
const n = 1000
|
|
|
|
c := make(chan bool)
|
|
|
|
var a []chan bool
|
|
|
|
for j := 0; j < n; j++ {
|
|
|
|
d := make(chan bool)
|
|
|
|
a = append(a, d)
|
|
|
|
go func() {
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
select {
|
|
|
|
case <-c:
|
|
|
|
case <-d:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
for _, d := range a {
|
|
|
|
d <- true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|