2009-03-06 18:51:31 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package net
|
|
|
|
|
|
|
|
import (
|
2012-01-18 17:24:06 -07:00
|
|
|
"fmt"
|
2012-11-23 23:15:26 -07:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2011-10-31 09:47:44 -06:00
|
|
|
"runtime"
|
2009-12-15 16:35:38 -07:00
|
|
|
"testing"
|
|
|
|
"time"
|
2009-03-06 18:51:31 -07:00
|
|
|
)
|
|
|
|
|
2012-11-23 23:15:26 -07:00
|
|
|
func isTimeout(err error) bool {
|
|
|
|
e, ok := err.(Error)
|
|
|
|
return ok && e.Timeout()
|
|
|
|
}
|
|
|
|
|
|
|
|
type copyRes struct {
|
|
|
|
n int64
|
|
|
|
err error
|
|
|
|
d time.Duration
|
|
|
|
}
|
|
|
|
|
2012-11-25 02:27:32 -07:00
|
|
|
func TestAcceptTimeout(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
ln := newLocalListener(t).(*TCPListener)
|
|
|
|
defer ln.Close()
|
|
|
|
ln.SetDeadline(time.Now().Add(-1 * time.Second))
|
|
|
|
if _, err := ln.Accept(); !isTimeout(err) {
|
|
|
|
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
if _, err := ln.Accept(); !isTimeout(err) {
|
|
|
|
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
ln.SetDeadline(time.Now().Add(100 * time.Millisecond))
|
|
|
|
if _, err := ln.Accept(); !isTimeout(err) {
|
|
|
|
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
if _, err := ln.Accept(); !isTimeout(err) {
|
|
|
|
t.Fatalf("Accept: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
ln.SetDeadline(noDeadline)
|
|
|
|
errc := make(chan error)
|
|
|
|
go func() {
|
|
|
|
_, err := ln.Accept()
|
|
|
|
errc <- err
|
|
|
|
}()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("Expected Accept() to not return, but it returned with %v\n", err)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
ln.Close()
|
2012-12-21 22:56:02 -07:00
|
|
|
switch nerr := <-errc; err := nerr.(type) {
|
|
|
|
case *OpError:
|
|
|
|
if err.Err != errClosing {
|
|
|
|
t.Fatalf("Accept: expected err %v, got %v", errClosing, err)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
if err != errClosing {
|
|
|
|
t.Fatalf("Accept: expected err %v, got %v", errClosing, err)
|
|
|
|
}
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReadTimeout(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Connect: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
c.SetDeadline(time.Now().Add(time.Hour))
|
|
|
|
c.SetReadDeadline(time.Now().Add(-1 * time.Second))
|
|
|
|
buf := make([]byte, 1)
|
|
|
|
if _, err = c.Read(buf); !isTimeout(err) {
|
|
|
|
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
if _, err = c.Read(buf); !isTimeout(err) {
|
|
|
|
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
c.SetDeadline(time.Now().Add(100 * time.Millisecond))
|
|
|
|
if _, err = c.Read(buf); !isTimeout(err) {
|
|
|
|
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
if _, err = c.Read(buf); !isTimeout(err) {
|
|
|
|
t.Fatalf("Read: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
c.SetReadDeadline(noDeadline)
|
|
|
|
c.SetWriteDeadline(time.Now().Add(-1 * time.Second))
|
|
|
|
errc := make(chan error)
|
|
|
|
go func() {
|
|
|
|
_, err := c.Read(buf)
|
|
|
|
errc <- err
|
|
|
|
}()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("Expected Read() to not return, but it returned with %v\n", err)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
c.Close()
|
2012-12-21 22:56:02 -07:00
|
|
|
switch nerr := <-errc; err := nerr.(type) {
|
|
|
|
case *OpError:
|
|
|
|
if err.Err != errClosing {
|
|
|
|
t.Fatalf("Read: expected err %v, got %v", errClosing, err)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
if err != errClosing {
|
|
|
|
t.Fatalf("Read: expected err %v, got %v", errClosing, err)
|
|
|
|
}
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestWriteTimeout(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
c, err := DialTCP("tcp", nil, ln.Addr().(*TCPAddr))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Connect: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
c.SetDeadline(time.Now().Add(time.Hour))
|
|
|
|
c.SetWriteDeadline(time.Now().Add(-1 * time.Second))
|
|
|
|
buf := make([]byte, 4096)
|
|
|
|
writeUntilTimeout := func() {
|
|
|
|
for {
|
|
|
|
_, err := c.Write(buf)
|
|
|
|
if err != nil {
|
|
|
|
if isTimeout(err) {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.Fatalf("Write: expected err %v, got %v", errTimeout, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
writeUntilTimeout()
|
|
|
|
c.SetDeadline(time.Now().Add(10 * time.Millisecond))
|
|
|
|
writeUntilTimeout()
|
|
|
|
writeUntilTimeout()
|
|
|
|
c.SetWriteDeadline(noDeadline)
|
|
|
|
c.SetReadDeadline(time.Now().Add(-1 * time.Second))
|
|
|
|
errc := make(chan error)
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
_, err := c.Write(buf)
|
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
t.Fatalf("Expected Write() to not return, but it returned with %v\n", err)
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
c.Close()
|
2012-12-21 22:56:02 -07:00
|
|
|
switch nerr := <-errc; err := nerr.(type) {
|
|
|
|
case *OpError:
|
|
|
|
if err.Err != errClosing {
|
|
|
|
t.Fatalf("Write: expected err %v, got %v", errClosing, err)
|
|
|
|
}
|
|
|
|
default:
|
|
|
|
if err != errClosing {
|
|
|
|
t.Fatalf("Write: expected err %v, got %v", errClosing, err)
|
|
|
|
}
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-03-05 17:43:45 -07:00
|
|
|
func testTimeout(t *testing.T, net, addr string, readFrom bool) {
|
|
|
|
c, err := Dial(net, addr)
|
2009-03-06 18:51:31 -07:00
|
|
|
if err != nil {
|
2012-03-05 17:43:45 -07:00
|
|
|
t.Errorf("Dial(%q, %q) failed: %v", net, addr, err)
|
2010-03-03 18:30:29 -07:00
|
|
|
return
|
2009-03-06 18:51:31 -07:00
|
|
|
}
|
2012-03-05 17:43:45 -07:00
|
|
|
defer c.Close()
|
2009-12-15 16:35:38 -07:00
|
|
|
what := "Read"
|
2009-11-17 09:39:17 -07:00
|
|
|
if readFrom {
|
|
|
|
what = "ReadFrom"
|
|
|
|
}
|
2012-01-18 17:24:06 -07:00
|
|
|
|
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
t0 := time.Now()
|
2012-03-05 17:43:45 -07:00
|
|
|
c.SetReadDeadline(time.Now().Add(100 * time.Millisecond))
|
2012-01-18 17:24:06 -07:00
|
|
|
var b [100]byte
|
|
|
|
var n int
|
2012-03-05 17:43:45 -07:00
|
|
|
var err error
|
2012-01-18 17:24:06 -07:00
|
|
|
if readFrom {
|
2012-03-05 17:43:45 -07:00
|
|
|
n, _, err = c.(PacketConn).ReadFrom(b[0:])
|
2012-01-18 17:24:06 -07:00
|
|
|
} else {
|
2012-03-05 17:43:45 -07:00
|
|
|
n, err = c.Read(b[0:])
|
2012-01-18 17:24:06 -07:00
|
|
|
}
|
|
|
|
t1 := time.Now()
|
2012-03-05 17:43:45 -07:00
|
|
|
if n != 0 || err == nil || !err.(Error).Timeout() {
|
|
|
|
errc <- fmt.Errorf("%s(%q, %q) did not return 0, timeout: %v, %v", what, net, addr, n, err)
|
2012-01-18 17:24:06 -07:00
|
|
|
return
|
|
|
|
}
|
2012-03-02 09:50:18 -07:00
|
|
|
if dt := t1.Sub(t0); dt < 50*time.Millisecond || !testing.Short() && dt > 250*time.Millisecond {
|
2012-03-05 17:43:45 -07:00
|
|
|
errc <- fmt.Errorf("%s(%q, %q) took %s, expected 0.1s", what, net, addr, dt)
|
2012-01-18 17:24:06 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
errc <- nil
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
case <-time.After(1 * time.Second):
|
2012-03-05 17:43:45 -07:00
|
|
|
t.Errorf("%s(%q, %q) took over 1 second, expected 0.1s", what, net, addr)
|
2009-03-06 18:51:31 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2009-11-17 09:39:17 -07:00
|
|
|
func TestTimeoutUDP(t *testing.T) {
|
2012-03-05 23:41:17 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2011-10-31 09:47:44 -06:00
|
|
|
}
|
2012-03-05 17:43:45 -07:00
|
|
|
|
|
|
|
// set up a listener that won't talk back
|
|
|
|
listening := make(chan string)
|
|
|
|
done := make(chan int)
|
|
|
|
go runDatagramPacketConnServer(t, "udp", "127.0.0.1:0", listening, done)
|
|
|
|
addr := <-listening
|
|
|
|
|
|
|
|
testTimeout(t, "udp", addr, false)
|
|
|
|
testTimeout(t, "udp", addr, true)
|
|
|
|
<-done
|
2009-11-17 09:39:17 -07:00
|
|
|
}
|
2009-03-06 18:51:31 -07:00
|
|
|
|
|
|
|
func TestTimeoutTCP(t *testing.T) {
|
2012-03-05 23:41:17 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2011-10-31 09:47:44 -06:00
|
|
|
}
|
2012-03-05 17:43:45 -07:00
|
|
|
|
2011-01-26 10:38:06 -07:00
|
|
|
// set up a listener that won't talk back
|
|
|
|
listening := make(chan string)
|
|
|
|
done := make(chan int)
|
2012-03-05 17:43:45 -07:00
|
|
|
go runStreamConnServer(t, "tcp", "127.0.0.1:0", listening, done)
|
2011-01-26 10:38:06 -07:00
|
|
|
addr := <-listening
|
|
|
|
|
|
|
|
testTimeout(t, "tcp", addr, false)
|
|
|
|
<-done
|
2009-03-06 18:51:31 -07:00
|
|
|
}
|
2012-01-24 15:06:12 -07:00
|
|
|
|
|
|
|
func TestDeadlineReset(t *testing.T) {
|
2012-03-05 23:41:17 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2012-01-24 15:06:12 -07:00
|
|
|
}
|
|
|
|
ln, err := Listen("tcp", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ln.Close()
|
|
|
|
tl := ln.(*TCPListener)
|
|
|
|
tl.SetDeadline(time.Now().Add(1 * time.Minute))
|
2012-11-25 02:27:32 -07:00
|
|
|
tl.SetDeadline(noDeadline) // reset it
|
2012-01-24 15:06:12 -07:00
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
_, err := ln.Accept()
|
|
|
|
errc <- err
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-time.After(50 * time.Millisecond):
|
|
|
|
// Pass.
|
|
|
|
case err := <-errc:
|
|
|
|
// Accept should never return; we never
|
|
|
|
// connected to it.
|
|
|
|
t.Errorf("unexpected return from Accept; err=%v", err)
|
|
|
|
}
|
|
|
|
}
|
2012-10-30 16:58:05 -06:00
|
|
|
|
|
|
|
func TestTimeoutAccept(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2012-10-30 16:58:05 -06:00
|
|
|
}
|
|
|
|
ln, err := Listen("tcp", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ln.Close()
|
|
|
|
tl := ln.(*TCPListener)
|
|
|
|
tl.SetDeadline(time.Now().Add(100 * time.Millisecond))
|
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
_, err := ln.Accept()
|
|
|
|
errc <- err
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case <-time.After(1 * time.Second):
|
|
|
|
// Accept shouldn't block indefinitely
|
|
|
|
t.Errorf("Accept didn't return in an expected time")
|
|
|
|
case <-errc:
|
|
|
|
// Pass.
|
|
|
|
}
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
|
|
|
func TestReadWriteDeadline(t *testing.T) {
|
2012-11-03 19:41:49 -06:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2012-11-03 19:41:49 -06:00
|
|
|
}
|
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
const (
|
2012-11-03 19:41:49 -06:00
|
|
|
readTimeout = 50 * time.Millisecond
|
|
|
|
writeTimeout = 250 * time.Millisecond
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
)
|
|
|
|
checkTimeout := func(command string, start time.Time, should time.Duration) {
|
|
|
|
is := time.Now().Sub(start)
|
2012-11-04 03:07:59 -07:00
|
|
|
d := is - should
|
2012-11-03 19:41:49 -06:00
|
|
|
if d < -30*time.Millisecond || !testing.Short() && 150*time.Millisecond < d {
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
t.Errorf("%s timeout test failed: is=%v should=%v\n", command, is, should)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := Listen("tcp", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ListenTCP on :0: %v", err)
|
|
|
|
}
|
2012-12-04 21:13:03 -07:00
|
|
|
defer ln.Close()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
|
|
|
lnquit := make(chan bool)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Accept: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
lnquit <- true
|
|
|
|
}()
|
|
|
|
|
|
|
|
c, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Dial: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
err = c.SetReadDeadline(start.Add(readTimeout))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("SetReadDeadline: %v", err)
|
|
|
|
}
|
|
|
|
err = c.SetWriteDeadline(start.Add(writeTimeout))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("SetWriteDeadline: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
quit := make(chan bool)
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var buf [10]byte
|
2012-11-01 13:52:30 -06:00
|
|
|
_, err := c.Read(buf[:])
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if err == nil {
|
|
|
|
t.Errorf("Read should not succeed")
|
|
|
|
}
|
|
|
|
checkTimeout("Read", start, readTimeout)
|
|
|
|
quit <- true
|
|
|
|
}()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
var buf [10000]byte
|
|
|
|
for {
|
2012-11-01 13:52:30 -06:00
|
|
|
_, err := c.Write(buf[:])
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
checkTimeout("Write", start, writeTimeout)
|
|
|
|
quit <- true
|
|
|
|
}()
|
|
|
|
|
|
|
|
<-quit
|
|
|
|
<-quit
|
|
|
|
<-lnquit
|
|
|
|
}
|
2012-11-23 23:15:26 -07:00
|
|
|
|
|
|
|
type neverEnding byte
|
|
|
|
|
|
|
|
func (b neverEnding) Read(p []byte) (n int, err error) {
|
|
|
|
for i := range p {
|
|
|
|
p[i] = byte(b)
|
|
|
|
}
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestVariousDeadlines1Proc(t *testing.T) {
|
|
|
|
testVariousDeadlines(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestVariousDeadlines4Proc(t *testing.T) {
|
|
|
|
testVariousDeadlines(t, 4)
|
|
|
|
}
|
|
|
|
|
|
|
|
func testVariousDeadlines(t *testing.T, maxProcs int) {
|
2013-02-27 23:18:02 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
2012-11-23 23:15:26 -07:00
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
2012-12-03 23:00:19 -07:00
|
|
|
acceptc := make(chan error, 1)
|
2012-11-23 23:15:26 -07:00
|
|
|
|
|
|
|
// The server, with no timeouts of its own, sending bytes to clients
|
|
|
|
// as fast as it can.
|
|
|
|
servec := make(chan copyRes)
|
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
2012-12-03 23:00:19 -07:00
|
|
|
acceptc <- err
|
2012-11-23 23:15:26 -07:00
|
|
|
return
|
|
|
|
}
|
|
|
|
go func() {
|
|
|
|
t0 := time.Now()
|
|
|
|
n, err := io.Copy(c, neverEnding('a'))
|
|
|
|
d := time.Since(t0)
|
|
|
|
c.Close()
|
|
|
|
servec <- copyRes{n, err, d}
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
for _, timeout := range []time.Duration{
|
|
|
|
1 * time.Nanosecond,
|
|
|
|
2 * time.Nanosecond,
|
|
|
|
5 * time.Nanosecond,
|
|
|
|
50 * time.Nanosecond,
|
|
|
|
100 * time.Nanosecond,
|
|
|
|
200 * time.Nanosecond,
|
|
|
|
500 * time.Nanosecond,
|
|
|
|
750 * time.Nanosecond,
|
|
|
|
1 * time.Microsecond,
|
|
|
|
5 * time.Microsecond,
|
|
|
|
25 * time.Microsecond,
|
|
|
|
250 * time.Microsecond,
|
|
|
|
500 * time.Microsecond,
|
|
|
|
1 * time.Millisecond,
|
|
|
|
5 * time.Millisecond,
|
|
|
|
100 * time.Millisecond,
|
|
|
|
250 * time.Millisecond,
|
|
|
|
500 * time.Millisecond,
|
|
|
|
1 * time.Second,
|
|
|
|
} {
|
|
|
|
numRuns := 3
|
|
|
|
if testing.Short() {
|
|
|
|
numRuns = 1
|
|
|
|
if timeout > 500*time.Microsecond {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for run := 0; run < numRuns; run++ {
|
|
|
|
name := fmt.Sprintf("%v run %d/%d", timeout, run+1, numRuns)
|
|
|
|
t.Log(name)
|
|
|
|
|
|
|
|
c, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Dial: %v", err)
|
|
|
|
}
|
|
|
|
clientc := make(chan copyRes)
|
|
|
|
go func() {
|
|
|
|
t0 := time.Now()
|
|
|
|
c.SetDeadline(t0.Add(timeout))
|
|
|
|
n, err := io.Copy(ioutil.Discard, c)
|
|
|
|
d := time.Since(t0)
|
|
|
|
c.Close()
|
|
|
|
clientc <- copyRes{n, err, d}
|
|
|
|
}()
|
|
|
|
|
2013-08-22 23:07:42 -06:00
|
|
|
tooLong := 2 * time.Second
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
tooLong = 5 * time.Second
|
|
|
|
}
|
2012-11-23 23:15:26 -07:00
|
|
|
select {
|
|
|
|
case res := <-clientc:
|
|
|
|
if isTimeout(res.err) {
|
|
|
|
t.Logf("for %v, good client timeout after %v, reading %d bytes", name, res.d, res.n)
|
|
|
|
} else {
|
|
|
|
t.Fatalf("for %v: client Copy = %d, %v (want timeout)", name, res.n, res.err)
|
|
|
|
}
|
|
|
|
case <-time.After(tooLong):
|
|
|
|
t.Fatalf("for %v: timeout (%v) waiting for client to timeout (%v) reading", name, tooLong, timeout)
|
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
|
|
|
case res := <-servec:
|
|
|
|
t.Logf("for %v: server in %v wrote %d, %v", name, res.d, res.n, res.err)
|
2012-12-03 23:00:19 -07:00
|
|
|
case err := <-acceptc:
|
|
|
|
t.Fatalf("for %v: server Accept = %v", name, err)
|
2012-11-23 23:15:26 -07:00
|
|
|
case <-time.After(tooLong):
|
|
|
|
t.Fatalf("for %v, timeout waiting for server to finish writing", name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestReadDeadlineDataAvailable tests that read deadlines work, even
|
|
|
|
// if there's data ready to be read.
|
|
|
|
func TestReadDeadlineDataAvailable(t *testing.T) {
|
2013-02-27 23:18:02 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
2012-11-23 23:15:26 -07:00
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
servec := make(chan copyRes)
|
2013-03-07 06:03:40 -07:00
|
|
|
const msg = "data client shouldn't read, even though it'll be waiting"
|
2012-11-23 23:15:26 -07:00
|
|
|
go func() {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Accept: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
n, err := c.Write([]byte(msg))
|
|
|
|
servec <- copyRes{n: int64(n), err: err}
|
|
|
|
}()
|
|
|
|
|
|
|
|
c, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Dial: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
if res := <-servec; res.err != nil || res.n != int64(len(msg)) {
|
2013-09-26 18:09:15 -06:00
|
|
|
t.Fatalf("unexpected server Write: n=%d, err=%v; want n=%d, err=nil", res.n, res.err, len(msg))
|
2012-11-23 23:15:26 -07:00
|
|
|
}
|
|
|
|
c.SetReadDeadline(time.Now().Add(-5 * time.Second)) // in the psat.
|
|
|
|
buf := make([]byte, len(msg)/2)
|
|
|
|
n, err := c.Read(buf)
|
|
|
|
if n > 0 || !isTimeout(err) {
|
|
|
|
t.Fatalf("client read = %d (%q) err=%v; want 0, timeout", n, buf[:n], err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestWriteDeadlineBufferAvailable tests that write deadlines work, even
|
|
|
|
// if there's buffer space available to write.
|
|
|
|
func TestWriteDeadlineBufferAvailable(t *testing.T) {
|
2013-02-27 23:18:02 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
2012-11-23 23:15:26 -07:00
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
servec := make(chan copyRes)
|
|
|
|
go func() {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Accept: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
c.SetWriteDeadline(time.Now().Add(-5 * time.Second)) // in the past
|
|
|
|
n, err := c.Write([]byte{'x'})
|
|
|
|
servec <- copyRes{n: int64(n), err: err}
|
|
|
|
}()
|
|
|
|
|
|
|
|
c, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Dial: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
res := <-servec
|
|
|
|
if res.n != 0 {
|
|
|
|
t.Errorf("Write = %d; want 0", res.n)
|
|
|
|
}
|
|
|
|
if !isTimeout(res.err) {
|
|
|
|
t.Errorf("Write error = %v; want timeout", res.err)
|
|
|
|
}
|
|
|
|
}
|
2012-11-26 11:28:39 -07:00
|
|
|
|
2013-03-07 06:03:40 -07:00
|
|
|
// TestAcceptDeadlineConnectionAvailable tests that accept deadlines work, even
|
|
|
|
// if there's incoming connections available.
|
|
|
|
func TestAcceptDeadlineConnectionAvailable(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
|
|
|
ln := newLocalListener(t).(*TCPListener)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
c, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Dial: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
var buf [1]byte
|
|
|
|
c.Read(buf[:]) // block until the connection or listener is closed
|
|
|
|
}()
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
ln.SetDeadline(time.Now().Add(-5 * time.Second)) // in the past
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err == nil {
|
|
|
|
defer c.Close()
|
|
|
|
}
|
|
|
|
if !isTimeout(err) {
|
|
|
|
t.Fatalf("Accept: got %v; want timeout", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// TestConnectDeadlineInThePast tests that connect deadlines work, even
|
|
|
|
// if the connection can be established w/o blocking.
|
|
|
|
func TestConnectDeadlineInThePast(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
|
|
|
ln := newLocalListener(t).(*TCPListener)
|
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
go func() {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err == nil {
|
|
|
|
defer c.Close()
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
time.Sleep(10 * time.Millisecond)
|
|
|
|
c, err := DialTimeout("tcp", ln.Addr().String(), -5*time.Second) // in the past
|
|
|
|
if err == nil {
|
|
|
|
defer c.Close()
|
|
|
|
}
|
|
|
|
if !isTimeout(err) {
|
|
|
|
t.Fatalf("DialTimeout: got %v; want timeout", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-11-26 11:28:39 -07:00
|
|
|
// TestProlongTimeout tests concurrent deadline modification.
|
|
|
|
// Known to cause data races in the past.
|
|
|
|
func TestProlongTimeout(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2013-01-23 23:32:10 -07:00
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
2012-11-26 11:28:39 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
2012-11-27 01:18:54 -07:00
|
|
|
connected := make(chan bool)
|
2012-11-26 11:28:39 -07:00
|
|
|
go func() {
|
|
|
|
s, err := ln.Accept()
|
2012-11-27 01:18:54 -07:00
|
|
|
connected <- true
|
2012-11-26 11:28:39 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("ln.Accept: %v", err)
|
|
|
|
}
|
|
|
|
defer s.Close()
|
|
|
|
s.SetDeadline(time.Now().Add(time.Hour))
|
|
|
|
go func() {
|
|
|
|
var buf [4096]byte
|
|
|
|
for {
|
|
|
|
_, err := s.Write(buf[:])
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
s.SetDeadline(time.Now().Add(time.Hour))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
buf := make([]byte, 1)
|
|
|
|
for {
|
|
|
|
_, err := s.Read(buf)
|
|
|
|
if err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
s.SetDeadline(time.Now().Add(time.Hour))
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
c, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("DialTCP: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
2012-11-27 01:18:54 -07:00
|
|
|
<-connected
|
2012-11-26 11:28:39 -07:00
|
|
|
for i := 0; i < 1024; i++ {
|
|
|
|
var buf [1]byte
|
|
|
|
c.Write(buf[:])
|
|
|
|
}
|
|
|
|
}
|
2013-08-13 02:55:57 -06:00
|
|
|
|
|
|
|
func TestDeadlineRace(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("skipping test on %q", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
2013-08-14 11:20:11 -06:00
|
|
|
N := 1000
|
|
|
|
if testing.Short() {
|
|
|
|
N = 50
|
|
|
|
}
|
2013-08-13 02:55:57 -06:00
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
|
|
|
|
ln := newLocalListener(t)
|
|
|
|
defer ln.Close()
|
|
|
|
c, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("Dial: %v", err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
done := make(chan bool)
|
|
|
|
go func() {
|
|
|
|
t := time.NewTicker(2 * time.Microsecond).C
|
2013-08-14 11:20:11 -06:00
|
|
|
for i := 0; i < N; i++ {
|
2013-08-13 02:55:57 -06:00
|
|
|
if err := c.SetDeadline(time.Now().Add(2 * time.Microsecond)); err != nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
<-t
|
|
|
|
}
|
|
|
|
done <- true
|
|
|
|
}()
|
|
|
|
var buf [1]byte
|
2013-08-14 11:20:11 -06:00
|
|
|
for i := 0; i < N; i++ {
|
2013-08-13 02:55:57 -06:00
|
|
|
c.Read(buf[:]) // ignore possible timeout errors
|
|
|
|
}
|
|
|
|
c.Close()
|
|
|
|
<-done
|
|
|
|
}
|