net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
// Copyright 2013 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package net
|
|
|
|
|
|
|
|
import (
|
|
|
|
"math/rand"
|
|
|
|
"runtime"
|
|
|
|
"testing"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
func TestMutexLock(t *testing.T) {
|
|
|
|
var mu fdMutex
|
|
|
|
|
|
|
|
if !mu.Incref() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if mu.Decref() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !mu.RWLock(true) {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if mu.RWUnlock(true) {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
|
|
|
|
if !mu.RWLock(false) {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if mu.RWUnlock(false) {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMutexClose(t *testing.T) {
|
|
|
|
var mu fdMutex
|
|
|
|
if !mu.IncrefAndClose() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
|
|
|
|
if mu.Incref() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if mu.RWLock(true) {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if mu.RWLock(false) {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if mu.IncrefAndClose() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMutexCloseUnblock(t *testing.T) {
|
|
|
|
c := make(chan bool)
|
|
|
|
var mu fdMutex
|
|
|
|
mu.RWLock(true)
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
go func() {
|
|
|
|
if mu.RWLock(true) {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
c <- true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
// Concurrent goroutines must not be able to read lock the mutex.
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
select {
|
|
|
|
case <-c:
|
|
|
|
t.Fatal("broken")
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
mu.IncrefAndClose() // Must unblock the readers.
|
|
|
|
for i := 0; i < 4; i++ {
|
|
|
|
select {
|
|
|
|
case <-c:
|
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if mu.Decref() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if !mu.RWUnlock(true) {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMutexPanic(t *testing.T) {
|
|
|
|
ensurePanics := func(f func()) {
|
|
|
|
defer func() {
|
|
|
|
if recover() == nil {
|
|
|
|
t.Fatal("does not panic")
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
f()
|
|
|
|
}
|
|
|
|
|
|
|
|
var mu fdMutex
|
|
|
|
ensurePanics(func() { mu.Decref() })
|
|
|
|
ensurePanics(func() { mu.RWUnlock(true) })
|
|
|
|
ensurePanics(func() { mu.RWUnlock(false) })
|
|
|
|
|
|
|
|
ensurePanics(func() { mu.Incref(); mu.Decref(); mu.Decref() })
|
|
|
|
ensurePanics(func() { mu.RWLock(true); mu.RWUnlock(true); mu.RWUnlock(true) })
|
|
|
|
ensurePanics(func() { mu.RWLock(false); mu.RWUnlock(false); mu.RWUnlock(false) })
|
|
|
|
|
|
|
|
// ensure that it's still not broken
|
|
|
|
mu.Incref()
|
|
|
|
mu.Decref()
|
|
|
|
mu.RWLock(true)
|
|
|
|
mu.RWUnlock(true)
|
|
|
|
mu.RWLock(false)
|
|
|
|
mu.RWUnlock(false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestMutexStress(t *testing.T) {
|
|
|
|
P := 8
|
|
|
|
N := int(1e6)
|
|
|
|
if testing.Short() {
|
|
|
|
P = 4
|
|
|
|
N = 1e4
|
|
|
|
}
|
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(P))
|
|
|
|
done := make(chan bool)
|
|
|
|
var mu fdMutex
|
|
|
|
var readState [2]uint64
|
|
|
|
var writeState [2]uint64
|
|
|
|
for p := 0; p < P; p++ {
|
|
|
|
go func() {
|
|
|
|
r := rand.New(rand.NewSource(rand.Int63()))
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
switch r.Intn(3) {
|
|
|
|
case 0:
|
|
|
|
if !mu.Incref() {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
if mu.Decref() {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
case 1:
|
|
|
|
if !mu.RWLock(true) {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
// Ensure that it provides mutual exclusion for readers.
|
|
|
|
if readState[0] != readState[1] {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
readState[0]++
|
|
|
|
readState[1]++
|
|
|
|
if mu.RWUnlock(true) {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
case 2:
|
|
|
|
if !mu.RWLock(false) {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
// Ensure that it provides mutual exclusion for writers.
|
|
|
|
if writeState[0] != writeState[1] {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
writeState[0]++
|
|
|
|
writeState[1]++
|
|
|
|
if mu.RWUnlock(false) {
|
2014-03-14 22:43:02 -06:00
|
|
|
t.Error("broken")
|
|
|
|
return
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
for p := 0; p < P; p++ {
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
if !mu.IncrefAndClose() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
if !mu.Decref() {
|
|
|
|
t.Fatal("broken")
|
|
|
|
}
|
|
|
|
}
|