2009-03-06 18:51:31 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package net
|
|
|
|
|
|
|
|
import (
|
2012-01-18 17:24:06 -07:00
|
|
|
"fmt"
|
2012-11-23 23:15:26 -07:00
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
2015-03-29 05:19:20 -06:00
|
|
|
"net/internal/socktest"
|
2011-10-31 09:47:44 -06:00
|
|
|
"runtime"
|
2015-04-28 06:17:46 -06:00
|
|
|
"sync"
|
2009-12-15 16:35:38 -07:00
|
|
|
"testing"
|
|
|
|
"time"
|
2009-03-06 18:51:31 -07:00
|
|
|
)
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
var dialTimeoutTests = []struct {
|
|
|
|
timeout time.Duration
|
|
|
|
delta time.Duration // for deadline
|
|
|
|
|
|
|
|
guard time.Duration
|
|
|
|
max time.Duration
|
|
|
|
}{
|
|
|
|
// Tests that dial timeouts, deadlines in the past work.
|
|
|
|
{-5 * time.Second, 0, -5 * time.Second, 100 * time.Millisecond},
|
|
|
|
{0, -5 * time.Second, -5 * time.Second, 100 * time.Millisecond},
|
|
|
|
{-5 * time.Second, 5 * time.Second, -5 * time.Second, 100 * time.Millisecond}, // timeout over deadline
|
|
|
|
|
|
|
|
{50 * time.Millisecond, 0, 100 * time.Millisecond, time.Second},
|
|
|
|
{0, 50 * time.Millisecond, 100 * time.Millisecond, time.Second},
|
|
|
|
{50 * time.Millisecond, 5 * time.Second, 100 * time.Millisecond, time.Second}, // timeout over deadline
|
|
|
|
}
|
2015-03-29 05:19:20 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
func TestDialTimeout(t *testing.T) {
|
|
|
|
origTestHookDialChannel := testHookDialChannel
|
|
|
|
defer func() { testHookDialChannel = origTestHookDialChannel }()
|
|
|
|
defer sw.Set(socktest.FilterConnect, nil)
|
|
|
|
|
|
|
|
for i, tt := range dialTimeoutTests {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9", "windows":
|
|
|
|
testHookDialChannel = func() { time.Sleep(tt.guard) }
|
|
|
|
if runtime.GOOS == "plan9" {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
fallthrough
|
|
|
|
default:
|
|
|
|
sw.Set(socktest.FilterConnect, func(so *socktest.Status) (socktest.AfterFilter, error) {
|
|
|
|
time.Sleep(tt.guard)
|
|
|
|
return nil, errTimedout
|
|
|
|
})
|
2015-03-29 05:19:20 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
ch := make(chan error)
|
|
|
|
d := Dialer{Timeout: tt.timeout}
|
|
|
|
if tt.delta != 0 {
|
|
|
|
d.Deadline = time.Now().Add(tt.delta)
|
2015-03-29 05:19:20 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
max := time.NewTimer(tt.max)
|
|
|
|
defer max.Stop()
|
|
|
|
go func() {
|
|
|
|
// This dial never starts to send any TCP SYN
|
|
|
|
// segment because of above socket filter and
|
|
|
|
// test hook.
|
|
|
|
c, err := d.Dial("tcp", "127.0.0.1:0")
|
|
|
|
if err == nil {
|
|
|
|
err = fmt.Errorf("unexpectedly established: tcp:%s->%s", c.LocalAddr(), c.RemoteAddr())
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
ch <- err
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case <-max.C:
|
|
|
|
t.Fatalf("#%d: Dial didn't return in an expected time", i)
|
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseDialError(err); perr != nil {
|
|
|
|
t.Errorf("#%d: %v", i, perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatalf("#%d: %v", i, err)
|
|
|
|
}
|
2015-03-29 05:19:20 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
var acceptTimeoutTests = []struct {
|
|
|
|
timeout time.Duration
|
|
|
|
xerrs [2]error // expected errors in transition
|
|
|
|
}{
|
|
|
|
// Tests that accept deadlines in the past work, even if
|
|
|
|
// there's incoming connections available.
|
|
|
|
{-5 * time.Second, [2]error{errTimeout, errTimeout}},
|
|
|
|
|
|
|
|
{50 * time.Millisecond, [2]error{nil, errTimeout}},
|
2012-11-23 23:15:26 -07:00
|
|
|
}
|
|
|
|
|
2012-11-25 02:27:32 -07:00
|
|
|
func TestAcceptTimeout(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
|
2015-04-02 08:11:39 -06:00
|
|
|
ln, err := newLocalListener("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2012-11-25 02:27:32 -07:00
|
|
|
defer ln.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
for i, tt := range acceptTimeoutTests {
|
|
|
|
if tt.timeout < 0 {
|
|
|
|
go func() {
|
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
c.Read(b[:])
|
|
|
|
c.Close()
|
|
|
|
}()
|
|
|
|
}
|
|
|
|
|
|
|
|
if err := ln.(*TCPListener).SetDeadline(time.Now().Add(tt.timeout)); err != nil {
|
|
|
|
t.Fatalf("$%d: %v", i, err)
|
|
|
|
}
|
|
|
|
for j, xerr := range tt.xerrs {
|
|
|
|
for {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if xerr != nil {
|
|
|
|
if perr := parseAcceptError(err); perr != nil {
|
|
|
|
t.Errorf("#%d/%d: %v", i, j, perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatalf("#%d/%d: %v", i, j, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
c.Close()
|
|
|
|
time.Sleep(tt.timeout / 3)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2015-04-16 23:35:54 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestAcceptTimeoutMustReturn(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
ln, err := newLocalListener("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2015-04-16 23:35:54 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
max := time.NewTimer(time.Second)
|
|
|
|
defer max.Stop()
|
|
|
|
ch := make(chan error)
|
2012-11-25 02:27:32 -07:00
|
|
|
go func() {
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := ln.(*TCPListener).SetDeadline(time.Now().Add(10 * time.Millisecond)); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err == nil {
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
ch <- err
|
2012-11-25 02:27:32 -07:00
|
|
|
}()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
2012-11-25 02:27:32 -07:00
|
|
|
select {
|
2015-04-28 06:17:46 -06:00
|
|
|
case <-max.C:
|
|
|
|
ln.Close()
|
|
|
|
<-ch // wait for tester goroutine to stop
|
|
|
|
t.Fatal("Accept didn't return in an expected time")
|
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseAcceptError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
func TestAcceptTimeoutMustNotReturn(t *testing.T) {
|
2012-11-25 02:27:32 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
|
2015-04-02 08:11:39 -06:00
|
|
|
ln, err := newLocalListener("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2012-11-25 02:27:32 -07:00
|
|
|
defer ln.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
max := time.NewTimer(100 * time.Millisecond)
|
|
|
|
defer max.Stop()
|
|
|
|
ch := make(chan error)
|
2012-11-25 02:27:32 -07:00
|
|
|
go func() {
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := ln.(*TCPListener).SetDeadline(time.Now().Add(-5 * time.Second)); err != nil {
|
|
|
|
t.Error(err)
|
2012-12-21 22:56:02 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := ln.(*TCPListener).SetDeadline(noDeadline); err != nil {
|
|
|
|
t.Error(err)
|
2014-05-20 10:10:19 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
_, err := ln.Accept()
|
|
|
|
ch <- err
|
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseAcceptError(err); perr != nil {
|
2015-04-16 08:10:56 -06:00
|
|
|
t.Error(perr)
|
2012-12-21 22:56:02 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatalf("expected Accept to not return, but it returned with %v", err)
|
|
|
|
case <-max.C:
|
|
|
|
ln.Close()
|
|
|
|
<-ch // wait for tester goroutine to stop
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
var readTimeoutTests = []struct {
|
|
|
|
timeout time.Duration
|
|
|
|
xerrs [2]error // expected errors in transition
|
|
|
|
}{
|
|
|
|
// Tests that read deadlines work, even if there's data ready
|
|
|
|
// to be read.
|
|
|
|
{-5 * time.Second, [2]error{errTimeout, errTimeout}},
|
|
|
|
|
|
|
|
{50 * time.Millisecond, [2]error{nil, errTimeout}},
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReadTimeout(t *testing.T) {
|
2012-11-25 02:27:32 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
handler := func(ls *localServer, ln Listener) {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Write([]byte("READ TIMEOUT TEST"))
|
|
|
|
defer c.Close()
|
|
|
|
}
|
|
|
|
ls, err := newLocalServer("tcp")
|
2015-04-02 08:11:39 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer ls.teardown()
|
|
|
|
if err := ls.buildup(handler); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
|
2012-11-25 02:27:32 -07:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
defer c.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
for i, tt := range readTimeoutTests {
|
|
|
|
if err := c.SetReadDeadline(time.Now().Add(tt.timeout)); err != nil {
|
|
|
|
t.Fatalf("#%d: %v", i, err)
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
for j, xerr := range tt.xerrs {
|
|
|
|
for {
|
|
|
|
n, err := c.Read(b[:])
|
|
|
|
if xerr != nil {
|
|
|
|
if perr := parseReadError(err); perr != nil {
|
|
|
|
t.Errorf("#%d/%d: %v", i, j, perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatalf("#%d/%d: %v", i, j, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
time.Sleep(tt.timeout / 3)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if n != 0 {
|
|
|
|
t.Fatalf("#%d/%d: read %d; want 0", i, j, n)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
break
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func TestReadTimeoutMustNotReturn(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
ln, err := newLocalListener("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2012-11-25 02:27:32 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer ln.Close()
|
2012-11-25 02:27:32 -07:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
2009-03-06 18:51:31 -07:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
2009-03-06 18:51:31 -07:00
|
|
|
}
|
2012-03-05 17:43:45 -07:00
|
|
|
defer c.Close()
|
2012-01-18 17:24:06 -07:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
max := time.NewTimer(100 * time.Millisecond)
|
|
|
|
defer max.Stop()
|
|
|
|
ch := make(chan error)
|
2012-01-18 17:24:06 -07:00
|
|
|
go func() {
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := c.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil {
|
|
|
|
t.Error(err)
|
2012-01-18 17:24:06 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := c.SetWriteDeadline(time.Now().Add(-5 * time.Second)); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := c.SetReadDeadline(noDeadline); err != nil {
|
|
|
|
t.Error(err)
|
2012-01-18 17:24:06 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
var b [1]byte
|
|
|
|
_, err := c.Read(b[:])
|
|
|
|
ch <- err
|
2012-01-18 17:24:06 -07:00
|
|
|
}()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
2012-01-18 17:24:06 -07:00
|
|
|
select {
|
2015-04-28 06:17:46 -06:00
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseReadError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Fatalf("expected Read to not return, but it returned with %v", err)
|
|
|
|
case <-max.C:
|
|
|
|
c.Close()
|
|
|
|
err := <-ch // wait for tester goroutine to stop
|
|
|
|
if perr := parseReadError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
if err == io.EOF && runtime.GOOS == "nacl" { // see golang.org/issue/8044
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || nerr.Timeout() || nerr.Temporary() {
|
|
|
|
t.Fatal(err)
|
2012-01-18 17:24:06 -07:00
|
|
|
}
|
2009-03-06 18:51:31 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-05-02 02:58:06 -06:00
|
|
|
var readFromTimeoutTests = []struct {
|
|
|
|
timeout time.Duration
|
|
|
|
xerrs [2]error // expected errors in transition
|
|
|
|
}{
|
|
|
|
// Tests that read deadlines work, even if there's data ready
|
|
|
|
// to be read.
|
|
|
|
{-5 * time.Second, [2]error{errTimeout, errTimeout}},
|
|
|
|
|
|
|
|
{50 * time.Millisecond, [2]error{nil, errTimeout}},
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestReadFromTimeout(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "nacl", "plan9":
|
|
|
|
t.Skipf("not supported on %s", runtime.GOOS) // see golang.org/issue/8916
|
|
|
|
}
|
|
|
|
|
|
|
|
ch := make(chan Addr)
|
|
|
|
defer close(ch)
|
|
|
|
handler := func(ls *localPacketServer, c PacketConn) {
|
|
|
|
if dst, ok := <-ch; ok {
|
|
|
|
c.WriteTo([]byte("READFROM TIMEOUT TEST"), dst)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ls, err := newLocalPacketServer("udp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ls.teardown()
|
|
|
|
if err := ls.buildup(handler); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
host, _, err := SplitHostPort(ls.PacketConn.LocalAddr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
c, err := ListenPacket(ls.PacketConn.LocalAddr().Network(), JoinHostPort(host, "0"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
ch <- c.LocalAddr()
|
|
|
|
|
|
|
|
for i, tt := range readFromTimeoutTests {
|
|
|
|
if err := c.SetReadDeadline(time.Now().Add(tt.timeout)); err != nil {
|
|
|
|
t.Fatalf("#%d: %v", i, err)
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
for j, xerr := range tt.xerrs {
|
|
|
|
for {
|
|
|
|
n, _, err := c.ReadFrom(b[:])
|
|
|
|
if xerr != nil {
|
|
|
|
if perr := parseReadError(err); perr != nil {
|
|
|
|
t.Errorf("#%d/%d: %v", i, j, perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatalf("#%d/%d: %v", i, j, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
time.Sleep(tt.timeout / 3)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if n != 0 {
|
|
|
|
t.Fatalf("#%d/%d: read %d; want 0", i, j, n)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
var writeTimeoutTests = []struct {
|
|
|
|
timeout time.Duration
|
|
|
|
xerrs [2]error // expected errors in transition
|
|
|
|
}{
|
|
|
|
// Tests that write deadlines work, even if there's buffer
|
|
|
|
// space available to write.
|
|
|
|
{-5 * time.Second, [2]error{errTimeout, errTimeout}},
|
|
|
|
|
|
|
|
{10 * time.Millisecond, [2]error{nil, errTimeout}},
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestWriteTimeout(t *testing.T) {
|
2012-03-05 23:41:17 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2011-10-31 09:47:44 -06:00
|
|
|
}
|
2012-03-05 17:43:45 -07:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
ln, err := newLocalListener("tcp")
|
2015-04-21 07:10:09 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer ln.Close()
|
2012-03-05 17:43:45 -07:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
for i, tt := range writeTimeoutTests {
|
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
2012-03-05 17:43:45 -07:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := c.SetWriteDeadline(time.Now().Add(tt.timeout)); err != nil {
|
|
|
|
t.Fatalf("#%d: %v", i, err)
|
|
|
|
}
|
|
|
|
for j, xerr := range tt.xerrs {
|
|
|
|
for {
|
|
|
|
n, err := c.Write([]byte("WRITE TIMEOUT TEST"))
|
|
|
|
if xerr != nil {
|
|
|
|
if perr := parseWriteError(err); perr != nil {
|
|
|
|
t.Errorf("#%d/%d: %v", i, j, perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatalf("#%d/%d: %v", i, j, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
time.Sleep(tt.timeout / 3)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if n != 0 {
|
|
|
|
t.Fatalf("#%d/%d: wrote %d; want 0", i, j, n)
|
|
|
|
}
|
2015-04-21 07:10:09 -06:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2009-03-06 18:51:31 -07:00
|
|
|
}
|
2012-01-24 15:06:12 -07:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
func TestWriteTimeoutMustNotReturn(t *testing.T) {
|
2012-03-05 23:41:17 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-01-24 15:06:12 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
ln, err := newLocalListener("tcp")
|
2012-01-24 15:06:12 -07:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ln.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
max := time.NewTimer(100 * time.Millisecond)
|
|
|
|
defer max.Stop()
|
|
|
|
ch := make(chan error)
|
2012-01-24 15:06:12 -07:00
|
|
|
go func() {
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := c.SetDeadline(time.Now().Add(-5 * time.Second)); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := c.SetReadDeadline(time.Now().Add(-5 * time.Second)); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
if err := c.SetWriteDeadline(noDeadline); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
for {
|
|
|
|
if _, err := c.Write(b[:]); err != nil {
|
|
|
|
ch <- err
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
2012-01-24 15:06:12 -07:00
|
|
|
}()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
2012-01-24 15:06:12 -07:00
|
|
|
select {
|
2015-04-28 06:17:46 -06:00
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseWriteError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Fatalf("expected Write to not return, but it returned with %v", err)
|
|
|
|
case <-max.C:
|
|
|
|
c.Close()
|
|
|
|
err := <-ch // wait for tester goroutine to stop
|
|
|
|
if perr := parseWriteError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || nerr.Timeout() || nerr.Temporary() {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2012-01-24 15:06:12 -07:00
|
|
|
}
|
|
|
|
}
|
2012-10-30 16:58:05 -06:00
|
|
|
|
2015-05-02 02:58:06 -06:00
|
|
|
var writeToTimeoutTests = []struct {
|
|
|
|
timeout time.Duration
|
|
|
|
xerrs [2]error // expected errors in transition
|
|
|
|
}{
|
|
|
|
// Tests that write deadlines work, even if there's buffer
|
|
|
|
// space available to write.
|
|
|
|
{-5 * time.Second, [2]error{errTimeout, errTimeout}},
|
|
|
|
|
|
|
|
{10 * time.Millisecond, [2]error{nil, errTimeout}},
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestWriteToTimeout(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "nacl", "plan9":
|
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
|
|
|
c1, err := newLocalPacketListener("udp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer c1.Close()
|
|
|
|
|
|
|
|
host, _, err := SplitHostPort(c1.LocalAddr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
for i, tt := range writeToTimeoutTests {
|
|
|
|
c2, err := ListenPacket(c1.LocalAddr().Network(), JoinHostPort(host, "0"))
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer c2.Close()
|
|
|
|
|
|
|
|
if err := c2.SetWriteDeadline(time.Now().Add(tt.timeout)); err != nil {
|
|
|
|
t.Fatalf("#%d: %v", i, err)
|
|
|
|
}
|
|
|
|
for j, xerr := range tt.xerrs {
|
|
|
|
for {
|
|
|
|
n, err := c2.WriteTo([]byte("WRITETO TIMEOUT TEST"), c1.LocalAddr())
|
|
|
|
if xerr != nil {
|
|
|
|
if perr := parseWriteError(err); perr != nil {
|
|
|
|
t.Errorf("#%d/%d: %v", i, j, perr)
|
|
|
|
}
|
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatalf("#%d/%d: %v", i, j, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if err == nil {
|
|
|
|
time.Sleep(tt.timeout / 3)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if n != 0 {
|
|
|
|
t.Fatalf("#%d/%d: wrote %d; want 0", i, j, n)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
func TestReadTimeoutFluctuation(t *testing.T) {
|
2012-10-30 16:58:05 -06:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-10-30 16:58:05 -06:00
|
|
|
}
|
2015-04-16 23:35:54 -06:00
|
|
|
|
|
|
|
ln, err := newLocalListener("tcp")
|
2012-10-30 16:58:05 -06:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ln.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
max := time.NewTimer(time.Second)
|
|
|
|
defer max.Stop()
|
|
|
|
ch := make(chan error)
|
|
|
|
go timeoutReceiver(c, 100*time.Millisecond, 50*time.Millisecond, 250*time.Millisecond, ch)
|
|
|
|
|
2012-10-30 16:58:05 -06:00
|
|
|
select {
|
2015-04-28 06:17:46 -06:00
|
|
|
case <-max.C:
|
|
|
|
t.Fatal("Read took over 1s; expected 0.1s")
|
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseReadError(err); perr != nil {
|
2015-04-16 23:35:54 -06:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2012-10-30 16:58:05 -06:00
|
|
|
}
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
func TestReadFromTimeoutFluctuation(t *testing.T) {
|
2012-11-03 19:41:49 -06:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-11-03 19:41:49 -06:00
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
c1, err := newLocalPacketListener("udp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer c1.Close()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
c2, err := Dial(c1.LocalAddr().Network(), c1.LocalAddr().String())
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer c2.Close()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
max := time.NewTimer(time.Second)
|
|
|
|
defer max.Stop()
|
|
|
|
ch := make(chan error)
|
|
|
|
go timeoutPacketReceiver(c2.(PacketConn), 100*time.Millisecond, 50*time.Millisecond, 250*time.Millisecond, ch)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
select {
|
|
|
|
case <-max.C:
|
|
|
|
t.Fatal("ReadFrom took over 1s; expected 0.1s")
|
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseReadError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
func TestWriteTimeoutFluctuation(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
ln, err := newLocalListener("tcp")
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer ln.Close()
|
|
|
|
|
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
defer c.Close()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-05-10 08:11:04 -06:00
|
|
|
d := time.Second
|
|
|
|
if runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") {
|
|
|
|
d = 3 * time.Second // see golang.org/issue/10775
|
|
|
|
}
|
|
|
|
max := time.NewTimer(d)
|
2015-04-28 06:17:46 -06:00
|
|
|
defer max.Stop()
|
|
|
|
ch := make(chan error)
|
|
|
|
go timeoutTransmitter(c, 100*time.Millisecond, 50*time.Millisecond, 250*time.Millisecond, ch)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
select {
|
|
|
|
case <-max.C:
|
2015-05-10 08:11:04 -06:00
|
|
|
t.Fatalf("Write took over %v; expected 0.1s", d)
|
2015-04-28 06:17:46 -06:00
|
|
|
case err := <-ch:
|
|
|
|
if perr := parseWriteError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
if nerr, ok := err.(Error); !ok || !nerr.Timeout() {
|
|
|
|
t.Fatal(err)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2012-11-23 23:15:26 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestVariousDeadlines1Proc(t *testing.T) {
|
|
|
|
testVariousDeadlines(t, 1)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestVariousDeadlines4Proc(t *testing.T) {
|
|
|
|
testVariousDeadlines(t, 4)
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
type neverEnding byte
|
|
|
|
|
|
|
|
func (b neverEnding) Read(p []byte) (int, error) {
|
|
|
|
for i := range p {
|
|
|
|
p[i] = byte(b)
|
|
|
|
}
|
|
|
|
return len(p), nil
|
|
|
|
}
|
|
|
|
|
2012-11-23 23:15:26 -07:00
|
|
|
func testVariousDeadlines(t *testing.T, maxProcs int) {
|
2013-02-27 23:18:02 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2013-02-27 23:18:02 -07:00
|
|
|
}
|
|
|
|
|
2012-11-23 23:15:26 -07:00
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(maxProcs))
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
type result struct {
|
|
|
|
n int64
|
|
|
|
err error
|
|
|
|
d time.Duration
|
|
|
|
}
|
|
|
|
|
|
|
|
ch := make(chan error, 1)
|
|
|
|
pasvch := make(chan result)
|
2015-04-02 08:11:39 -06:00
|
|
|
handler := func(ls *localServer, ln Listener) {
|
2012-11-23 23:15:26 -07:00
|
|
|
for {
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
ch <- err
|
2012-11-23 23:15:26 -07:00
|
|
|
return
|
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
// The server, with no timeouts of its own,
|
|
|
|
// sending bytes to clients as fast as it can.
|
2012-11-23 23:15:26 -07:00
|
|
|
go func() {
|
|
|
|
t0 := time.Now()
|
|
|
|
n, err := io.Copy(c, neverEnding('a'))
|
2015-04-28 06:17:46 -06:00
|
|
|
dt := time.Since(t0)
|
2012-11-23 23:15:26 -07:00
|
|
|
c.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
pasvch <- result{n, err, dt}
|
2012-11-23 23:15:26 -07:00
|
|
|
}()
|
|
|
|
}
|
2015-04-02 08:11:39 -06:00
|
|
|
}
|
|
|
|
ls, err := newLocalServer("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ls.teardown()
|
|
|
|
if err := ls.buildup(handler); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2012-11-23 23:15:26 -07:00
|
|
|
|
|
|
|
for _, timeout := range []time.Duration{
|
|
|
|
1 * time.Nanosecond,
|
|
|
|
2 * time.Nanosecond,
|
|
|
|
5 * time.Nanosecond,
|
|
|
|
50 * time.Nanosecond,
|
|
|
|
100 * time.Nanosecond,
|
|
|
|
200 * time.Nanosecond,
|
|
|
|
500 * time.Nanosecond,
|
|
|
|
750 * time.Nanosecond,
|
|
|
|
1 * time.Microsecond,
|
|
|
|
5 * time.Microsecond,
|
|
|
|
25 * time.Microsecond,
|
|
|
|
250 * time.Microsecond,
|
|
|
|
500 * time.Microsecond,
|
|
|
|
1 * time.Millisecond,
|
|
|
|
5 * time.Millisecond,
|
|
|
|
100 * time.Millisecond,
|
|
|
|
250 * time.Millisecond,
|
|
|
|
500 * time.Millisecond,
|
|
|
|
1 * time.Second,
|
|
|
|
} {
|
|
|
|
numRuns := 3
|
|
|
|
if testing.Short() {
|
|
|
|
numRuns = 1
|
|
|
|
if timeout > 500*time.Microsecond {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for run := 0; run < numRuns; run++ {
|
|
|
|
name := fmt.Sprintf("%v run %d/%d", timeout, run+1, numRuns)
|
|
|
|
t.Log(name)
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
|
2012-11-23 23:15:26 -07:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
2012-11-23 23:15:26 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
tooLong := 5 * time.Second
|
|
|
|
max := time.NewTimer(tooLong)
|
|
|
|
defer max.Stop()
|
|
|
|
actvch := make(chan result)
|
2012-11-23 23:15:26 -07:00
|
|
|
go func() {
|
|
|
|
t0 := time.Now()
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := c.SetDeadline(t0.Add(timeout)); err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
2012-11-23 23:15:26 -07:00
|
|
|
n, err := io.Copy(ioutil.Discard, c)
|
2015-04-28 06:17:46 -06:00
|
|
|
dt := time.Since(t0)
|
2012-11-23 23:15:26 -07:00
|
|
|
c.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
actvch <- result{n, err, dt}
|
2012-11-23 23:15:26 -07:00
|
|
|
}()
|
|
|
|
|
|
|
|
select {
|
2015-04-28 06:17:46 -06:00
|
|
|
case res := <-actvch:
|
|
|
|
if nerr, ok := res.err.(Error); ok && nerr.Timeout() {
|
2012-11-23 23:15:26 -07:00
|
|
|
t.Logf("for %v, good client timeout after %v, reading %d bytes", name, res.d, res.n)
|
|
|
|
} else {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatalf("for %v, client Copy = %d, %v; want timeout", name, res.n, res.err)
|
2012-11-23 23:15:26 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
case <-max.C:
|
|
|
|
t.Fatalf("for %v, timeout (%v) waiting for client to timeout (%v) reading", name, tooLong, timeout)
|
2012-11-23 23:15:26 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
select {
|
2015-04-28 06:17:46 -06:00
|
|
|
case res := <-pasvch:
|
|
|
|
t.Logf("for %v, server in %v wrote %d: %v", name, res.d, res.n, res.err)
|
|
|
|
case err := <-ch:
|
|
|
|
t.Fatalf("for %v, Accept = %v", name, err)
|
|
|
|
case <-max.C:
|
2012-11-23 23:15:26 -07:00
|
|
|
t.Fatalf("for %v, timeout waiting for server to finish writing", name)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
// TestReadWriteProlongedTimeout tests concurrent deadline
|
|
|
|
// modification. Known to cause data races in the past.
|
|
|
|
func TestReadWriteProlongedTimeout(t *testing.T) {
|
2013-02-27 23:18:02 -07:00
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2013-02-27 23:18:02 -07:00
|
|
|
}
|
|
|
|
|
2015-04-02 08:11:39 -06:00
|
|
|
handler := func(ls *localServer, ln Listener) {
|
2012-11-23 23:15:26 -07:00
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Error(err)
|
2014-03-14 22:43:02 -06:00
|
|
|
return
|
2013-03-07 06:03:40 -07:00
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(2)
|
2012-11-26 11:28:39 -07:00
|
|
|
go func() {
|
2015-04-28 06:17:46 -06:00
|
|
|
defer wg.Done()
|
|
|
|
var b [1]byte
|
2012-11-26 11:28:39 -07:00
|
|
|
for {
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := c.SetReadDeadline(time.Now().Add(time.Hour)); err != nil {
|
|
|
|
if perr := parseCommonError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err := c.Read(b[:]); err != nil {
|
|
|
|
if perr := parseReadError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
return
|
2012-11-26 11:28:39 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
2015-04-28 06:17:46 -06:00
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var b [1]byte
|
|
|
|
for {
|
|
|
|
if err := c.SetWriteDeadline(time.Now().Add(time.Hour)); err != nil {
|
|
|
|
if perr := parseCommonError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if _, err := c.Write(b[:]); err != nil {
|
|
|
|
if perr := parseWriteError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
2012-11-26 11:28:39 -07:00
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
}()
|
|
|
|
wg.Wait()
|
2015-04-02 08:11:39 -06:00
|
|
|
}
|
|
|
|
ls, err := newLocalServer("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ls.teardown()
|
|
|
|
if err := ls.buildup(handler); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
|
2012-11-26 11:28:39 -07:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
2012-11-26 11:28:39 -07:00
|
|
|
}
|
|
|
|
defer c.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
var b [1]byte
|
|
|
|
for i := 0; i < 1000; i++ {
|
|
|
|
c.Write(b[:])
|
|
|
|
c.Read(b[:])
|
2012-11-26 11:28:39 -07:00
|
|
|
}
|
|
|
|
}
|
2013-08-13 02:55:57 -06:00
|
|
|
|
2015-04-28 06:17:46 -06:00
|
|
|
func TestReadWriteDeadlineRace(t *testing.T) {
|
2013-08-13 02:55:57 -06:00
|
|
|
switch runtime.GOOS {
|
2014-05-20 10:10:19 -06:00
|
|
|
case "nacl", "plan9":
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2013-08-13 02:55:57 -06:00
|
|
|
}
|
|
|
|
|
2013-08-14 11:20:11 -06:00
|
|
|
N := 1000
|
|
|
|
if testing.Short() {
|
|
|
|
N = 50
|
|
|
|
}
|
2013-08-13 02:55:57 -06:00
|
|
|
defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
|
2015-04-28 06:17:46 -06:00
|
|
|
|
2015-04-02 08:11:39 -06:00
|
|
|
ln, err := newLocalListener("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2013-08-13 02:55:57 -06:00
|
|
|
defer ln.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
2013-08-13 02:55:57 -06:00
|
|
|
if err != nil {
|
2015-04-28 06:17:46 -06:00
|
|
|
t.Fatal(err)
|
2013-08-13 02:55:57 -06:00
|
|
|
}
|
|
|
|
defer c.Close()
|
2015-04-28 06:17:46 -06:00
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(3)
|
2013-08-13 02:55:57 -06:00
|
|
|
go func() {
|
2015-04-28 06:17:46 -06:00
|
|
|
defer wg.Done()
|
|
|
|
tic := time.NewTicker(2 * time.Microsecond)
|
|
|
|
defer tic.Stop()
|
2013-08-14 11:20:11 -06:00
|
|
|
for i := 0; i < N; i++ {
|
2015-04-28 06:17:46 -06:00
|
|
|
if err := c.SetReadDeadline(time.Now().Add(2 * time.Microsecond)); err != nil {
|
|
|
|
if perr := parseCommonError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err := c.SetWriteDeadline(time.Now().Add(2 * time.Microsecond)); err != nil {
|
|
|
|
if perr := parseCommonError(err); perr != nil {
|
|
|
|
t.Error(perr)
|
|
|
|
}
|
2013-08-13 02:55:57 -06:00
|
|
|
break
|
|
|
|
}
|
2015-04-28 06:17:46 -06:00
|
|
|
<-tic.C
|
2013-08-13 02:55:57 -06:00
|
|
|
}
|
|
|
|
}()
|
2015-04-28 06:17:46 -06:00
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var b [1]byte
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
c.Read(b[:]) // ignore possible timeout errors
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
var b [1]byte
|
|
|
|
for i := 0; i < N; i++ {
|
|
|
|
c.Write(b[:]) // ignore possible timeout errors
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
wg.Wait() // wait for tester goroutine to stop
|
2013-08-13 02:55:57 -06:00
|
|
|
}
|