2009-06-25 21:24:55 -06:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
2018-03-04 04:18:32 -07:00
|
|
|
// +build !js
|
|
|
|
|
2009-06-25 21:24:55 -06:00
|
|
|
package net
|
|
|
|
|
|
|
|
import (
|
2016-10-15 09:56:51 -06:00
|
|
|
"errors"
|
|
|
|
"fmt"
|
2018-08-03 10:58:07 -06:00
|
|
|
"internal/testenv"
|
2011-11-01 20:05:34 -06:00
|
|
|
"io"
|
2016-02-19 01:45:22 -07:00
|
|
|
"net/internal/socktest"
|
2012-05-29 16:08:58 -06:00
|
|
|
"os"
|
2011-10-31 09:47:44 -06:00
|
|
|
"runtime"
|
2009-12-15 16:35:38 -07:00
|
|
|
"testing"
|
2016-01-27 19:05:03 -07:00
|
|
|
"time"
|
2009-06-25 21:24:55 -06:00
|
|
|
)
|
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
func TestCloseRead(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
2016-10-15 08:52:57 -06:00
|
|
|
case "plan9":
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2011-10-31 09:47:44 -06:00
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
|
|
|
|
for _, network := range []string{"tcp", "unix", "unixpacket"} {
|
|
|
|
if !testableNetwork(network) {
|
|
|
|
t.Logf("skipping %s test", network)
|
|
|
|
continue
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
ln, err := newLocalListener(network)
|
2011-09-28 09:12:38 -06:00
|
|
|
if err != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Fatal(err)
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
switch network {
|
|
|
|
case "unix", "unixpacket":
|
|
|
|
defer os.Remove(ln.Addr().String())
|
2015-04-16 08:10:56 -06:00
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
defer ln.Close()
|
2011-09-28 09:12:38 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
switch network {
|
|
|
|
case "unix", "unixpacket":
|
|
|
|
defer os.Remove(c.LocalAddr().String())
|
|
|
|
}
|
|
|
|
defer c.Close()
|
2011-09-28 09:12:38 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
switch c := c.(type) {
|
|
|
|
case *TCPConn:
|
|
|
|
err = c.CloseRead()
|
|
|
|
case *UnixConn:
|
|
|
|
err = c.CloseRead()
|
|
|
|
}
|
|
|
|
if err != nil {
|
2017-04-07 16:53:19 -06:00
|
|
|
if perr := parseCloseError(err, true); perr != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
n, err := c.Read(b[:])
|
|
|
|
if n != 0 || err == nil {
|
|
|
|
t.Fatalf("got (%d, %v); want (0, error)", n, err)
|
|
|
|
}
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
func TestCloseWrite(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "nacl", "plan9":
|
|
|
|
t.Skipf("not supported on %s", runtime.GOOS)
|
2012-05-29 16:08:58 -06:00
|
|
|
}
|
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
handler := func(ls *localServer, ln Listener) {
|
2012-05-29 16:08:58 -06:00
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Error(err)
|
2014-03-14 22:43:02 -06:00
|
|
|
return
|
2012-05-29 16:08:58 -06:00
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
var b [1]byte
|
|
|
|
n, err := c.Read(b[:])
|
2012-05-29 16:08:58 -06:00
|
|
|
if n != 0 || err != io.EOF {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Errorf("got (%d, %v); want (0, io.EOF)", n, err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
switch c := c.(type) {
|
|
|
|
case *TCPConn:
|
|
|
|
err = c.CloseWrite()
|
|
|
|
case *UnixConn:
|
|
|
|
err = c.CloseWrite()
|
|
|
|
}
|
|
|
|
if err != nil {
|
2017-04-07 16:53:19 -06:00
|
|
|
if perr := parseCloseError(err, true); perr != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Error(err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
n, err = c.Write(b[:])
|
|
|
|
if err == nil {
|
|
|
|
t.Errorf("got (%d, %v); want (any, error)", n, err)
|
2014-03-14 22:43:02 -06:00
|
|
|
return
|
2012-05-29 16:08:58 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
for _, network := range []string{"tcp", "unix", "unixpacket"} {
|
|
|
|
if !testableNetwork(network) {
|
|
|
|
t.Logf("skipping %s test", network)
|
|
|
|
continue
|
|
|
|
}
|
2012-05-29 16:08:58 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
ls, err := newLocalServer(network)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ls.teardown()
|
|
|
|
if err := ls.buildup(handler); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
switch network {
|
|
|
|
case "unix", "unixpacket":
|
|
|
|
defer os.Remove(c.LocalAddr().String())
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
|
|
|
|
switch c := c.(type) {
|
|
|
|
case *TCPConn:
|
|
|
|
err = c.CloseWrite()
|
|
|
|
case *UnixConn:
|
|
|
|
err = c.CloseWrite()
|
|
|
|
}
|
|
|
|
if err != nil {
|
2017-04-07 16:53:19 -06:00
|
|
|
if perr := parseCloseError(err, true); perr != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
n, err := c.Read(b[:])
|
|
|
|
if n != 0 || err != io.EOF {
|
|
|
|
t.Fatalf("got (%d, %v); want (0, io.EOF)", n, err)
|
|
|
|
}
|
|
|
|
n, err = c.Write(b[:])
|
2012-02-13 22:40:37 -07:00
|
|
|
if err == nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Fatalf("got (%d, %v); want (any, error)", n, err)
|
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
func TestConnClose(t *testing.T) {
|
|
|
|
for _, network := range []string{"tcp", "unix", "unixpacket"} {
|
|
|
|
if !testableNetwork(network) {
|
|
|
|
t.Logf("skipping %s test", network)
|
|
|
|
continue
|
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
ln, err := newLocalListener(network)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
2015-04-16 08:10:56 -06:00
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
switch network {
|
|
|
|
case "unix", "unixpacket":
|
|
|
|
defer os.Remove(ln.Addr().String())
|
|
|
|
}
|
|
|
|
defer ln.Close()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
c, err := Dial(ln.Addr().Network(), ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
switch network {
|
|
|
|
case "unix", "unixpacket":
|
|
|
|
defer os.Remove(c.LocalAddr().String())
|
|
|
|
}
|
|
|
|
defer c.Close()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
if err := c.Close(); err != nil {
|
2017-04-07 16:53:19 -06:00
|
|
|
if perr := parseCloseError(err, false); perr != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
n, err := c.Read(b[:])
|
|
|
|
if n != 0 || err == nil {
|
|
|
|
t.Fatalf("got (%d, %v); want (0, error)", n, err)
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
func TestListenerClose(t *testing.T) {
|
|
|
|
for _, network := range []string{"tcp", "unix", "unixpacket"} {
|
|
|
|
if !testableNetwork(network) {
|
|
|
|
t.Logf("skipping %s test", network)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := newLocalListener(network)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if err != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Fatal(err)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
switch network {
|
|
|
|
case "unix", "unixpacket":
|
|
|
|
defer os.Remove(ln.Addr().String())
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2016-01-27 19:05:03 -07:00
|
|
|
dst := ln.Addr().String()
|
2015-04-29 02:16:21 -06:00
|
|
|
if err := ln.Close(); err != nil {
|
2017-04-07 16:53:19 -06:00
|
|
|
if perr := parseCloseError(err, false); perr != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err == nil {
|
|
|
|
c.Close()
|
|
|
|
t.Fatal("should fail")
|
|
|
|
}
|
2015-10-15 22:41:34 -06:00
|
|
|
|
|
|
|
if network == "tcp" {
|
2016-01-27 19:05:03 -07:00
|
|
|
// We will have two TCP FSMs inside the
|
|
|
|
// kernel here. There's no guarantee that a
|
|
|
|
// signal comes from the far end FSM will be
|
|
|
|
// delivered immediately to the near end FSM,
|
|
|
|
// especially on the platforms that allow
|
|
|
|
// multiple consumer threads to pull pending
|
|
|
|
// established connections at the same time by
|
|
|
|
// enabling SO_REUSEPORT option such as Linux,
|
|
|
|
// DragonFly BSD. So we need to give some time
|
|
|
|
// quantum to the kernel.
|
|
|
|
//
|
|
|
|
// Note that net.inet.tcp.reuseport_ext=1 by
|
|
|
|
// default on DragonFly BSD.
|
|
|
|
time.Sleep(time.Millisecond)
|
|
|
|
|
|
|
|
cc, err := Dial("tcp", dst)
|
2015-10-15 22:41:34 -06:00
|
|
|
if err == nil {
|
2016-01-27 19:05:03 -07:00
|
|
|
t.Error("Dial to closed TCP listener succeeded.")
|
2015-10-15 22:41:34 -06:00
|
|
|
cc.Close()
|
|
|
|
}
|
|
|
|
}
|
2015-04-29 02:16:21 -06:00
|
|
|
}
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
func TestPacketConnClose(t *testing.T) {
|
|
|
|
for _, network := range []string{"udp", "unixgram"} {
|
|
|
|
if !testableNetwork(network) {
|
|
|
|
t.Logf("skipping %s test", network)
|
|
|
|
continue
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
c, err := newLocalPacketListener(network)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
switch network {
|
|
|
|
case "unixgram":
|
|
|
|
defer os.Remove(c.LocalAddr().String())
|
|
|
|
}
|
|
|
|
defer c.Close()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2015-04-29 02:16:21 -06:00
|
|
|
if err := c.Close(); err != nil {
|
2017-04-07 16:53:19 -06:00
|
|
|
if perr := parseCloseError(err, false); perr != nil {
|
2015-04-29 02:16:21 -06:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
var b [1]byte
|
|
|
|
n, _, err := c.ReadFrom(b[:])
|
|
|
|
if n != 0 || err == nil {
|
|
|
|
t.Fatalf("got (%d, %v); want (0, error)", n, err)
|
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
|
|
|
}
|
2015-11-04 00:17:57 -07:00
|
|
|
|
|
|
|
// nacl was previous failing to reuse an address.
|
|
|
|
func TestListenCloseListen(t *testing.T) {
|
|
|
|
const maxTries = 10
|
|
|
|
for tries := 0; tries < maxTries; tries++ {
|
|
|
|
ln, err := newLocalListener("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
addr := ln.Addr().String()
|
|
|
|
if err := ln.Close(); err != nil {
|
2017-04-07 16:53:19 -06:00
|
|
|
if perr := parseCloseError(err, false); perr != nil {
|
2016-01-27 19:05:03 -07:00
|
|
|
t.Error(perr)
|
|
|
|
}
|
2015-11-04 00:17:57 -07:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
ln, err = Listen("tcp", addr)
|
|
|
|
if err == nil {
|
|
|
|
// Success. nacl couldn't do this before.
|
|
|
|
ln.Close()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
t.Errorf("failed on try %d/%d: %v", tries+1, maxTries, err)
|
|
|
|
}
|
2015-11-18 12:10:59 -07:00
|
|
|
t.Fatalf("failed to listen/close/listen on same address after %d tries", maxTries)
|
2015-11-04 00:17:57 -07:00
|
|
|
}
|
2016-02-19 01:45:22 -07:00
|
|
|
|
|
|
|
// See golang.org/issue/6163, golang.org/issue/6987.
|
|
|
|
func TestAcceptIgnoreAbortedConnRequest(t *testing.T) {
|
|
|
|
switch runtime.GOOS {
|
|
|
|
case "plan9":
|
|
|
|
t.Skipf("%s does not have full support of socktest", runtime.GOOS)
|
|
|
|
}
|
|
|
|
|
|
|
|
syserr := make(chan error)
|
|
|
|
go func() {
|
|
|
|
defer close(syserr)
|
|
|
|
for _, err := range abortedConnRequestErrors {
|
|
|
|
syserr <- err
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
sw.Set(socktest.FilterAccept, func(so *socktest.Status) (socktest.AfterFilter, error) {
|
|
|
|
if err, ok := <-syserr; ok {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
return nil, nil
|
|
|
|
})
|
|
|
|
defer sw.Set(socktest.FilterAccept, nil)
|
|
|
|
|
|
|
|
operr := make(chan error, 1)
|
|
|
|
handler := func(ls *localServer, ln Listener) {
|
|
|
|
defer close(operr)
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
if perr := parseAcceptError(err); perr != nil {
|
|
|
|
operr <- perr
|
|
|
|
}
|
|
|
|
operr <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
c.Close()
|
|
|
|
}
|
|
|
|
ls, err := newLocalServer("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ls.teardown()
|
|
|
|
if err := ls.buildup(handler); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
c, err := Dial(ls.Listener.Addr().Network(), ls.Listener.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
c.Close()
|
|
|
|
|
|
|
|
for err := range operr {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
}
|
2016-05-18 15:54:12 -06:00
|
|
|
|
|
|
|
func TestZeroByteRead(t *testing.T) {
|
|
|
|
for _, network := range []string{"tcp", "unix", "unixpacket"} {
|
|
|
|
if !testableNetwork(network) {
|
|
|
|
t.Logf("skipping %s test", network)
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
ln, err := newLocalListener(network)
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
connc := make(chan Conn, 1)
|
|
|
|
go func() {
|
|
|
|
defer ln.Close()
|
|
|
|
c, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
t.Error(err)
|
|
|
|
}
|
|
|
|
connc <- c // might be nil
|
|
|
|
}()
|
|
|
|
c, err := Dial(network, ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer c.Close()
|
|
|
|
sc := <-connc
|
|
|
|
if sc == nil {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
defer sc.Close()
|
|
|
|
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
// A zero byte read on Windows caused a wait for readability first.
|
|
|
|
// Rather than change that behavior, satisfy it in this test.
|
|
|
|
// See Issue 15735.
|
|
|
|
go io.WriteString(sc, "a")
|
|
|
|
}
|
|
|
|
|
|
|
|
n, err := c.Read(nil)
|
|
|
|
if n != 0 || err != nil {
|
|
|
|
t.Errorf("%s: zero byte client read = %v, %v; want 0, nil", network, n, err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if runtime.GOOS == "windows" {
|
|
|
|
// Same as comment above.
|
|
|
|
go io.WriteString(c, "a")
|
|
|
|
}
|
|
|
|
n, err = sc.Read(nil)
|
|
|
|
if n != 0 || err != nil {
|
|
|
|
t.Errorf("%s: zero byte server read = %v, %v; want 0, nil", network, n, err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-09-27 14:50:57 -06:00
|
|
|
|
|
|
|
// withTCPConnPair sets up a TCP connection between two peers, then
|
|
|
|
// runs peer1 and peer2 concurrently. withTCPConnPair returns when
|
|
|
|
// both have completed.
|
|
|
|
func withTCPConnPair(t *testing.T, peer1, peer2 func(c *TCPConn) error) {
|
|
|
|
ln, err := newLocalListener("tcp")
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer ln.Close()
|
|
|
|
errc := make(chan error, 2)
|
|
|
|
go func() {
|
|
|
|
c1, err := ln.Accept()
|
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer c1.Close()
|
|
|
|
errc <- peer1(c1.(*TCPConn))
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
c2, err := Dial("tcp", ln.Addr().String())
|
|
|
|
if err != nil {
|
|
|
|
errc <- err
|
|
|
|
return
|
|
|
|
}
|
|
|
|
defer c2.Close()
|
|
|
|
errc <- peer2(c2.(*TCPConn))
|
|
|
|
}()
|
|
|
|
for i := 0; i < 2; i++ {
|
|
|
|
if err := <-errc; err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2016-10-15 09:56:51 -06:00
|
|
|
|
|
|
|
// Tests that a blocked Read is interrupted by a concurrent SetReadDeadline
|
|
|
|
// modifying that Conn's read deadline to the past.
|
|
|
|
// See golang.org/cl/30164 which documented this. The net/http package
|
|
|
|
// depends on this.
|
|
|
|
func TestReadTimeoutUnblocksRead(t *testing.T) {
|
|
|
|
serverDone := make(chan struct{})
|
|
|
|
server := func(cs *TCPConn) error {
|
|
|
|
defer close(serverDone)
|
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
defer close(errc)
|
|
|
|
go func() {
|
|
|
|
// TODO: find a better way to wait
|
|
|
|
// until we're blocked in the cs.Read
|
|
|
|
// call below. Sleep is lame.
|
|
|
|
time.Sleep(100 * time.Millisecond)
|
|
|
|
|
|
|
|
// Interrupt the upcoming Read, unblocking it:
|
|
|
|
cs.SetReadDeadline(time.Unix(123, 0)) // time in the past
|
|
|
|
}()
|
|
|
|
var buf [1]byte
|
|
|
|
n, err := cs.Read(buf[:1])
|
|
|
|
if n != 0 || err == nil {
|
|
|
|
errc <- fmt.Errorf("Read = %v, %v; want 0, non-nil", n, err)
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
select {
|
|
|
|
case err := <-errc:
|
|
|
|
return err
|
|
|
|
case <-time.After(5 * time.Second):
|
|
|
|
buf := make([]byte, 2<<20)
|
|
|
|
buf = buf[:runtime.Stack(buf, true)]
|
|
|
|
println("Stacks at timeout:\n", string(buf))
|
|
|
|
return errors.New("timeout waiting for Read to finish")
|
|
|
|
}
|
|
|
|
|
|
|
|
}
|
|
|
|
// Do nothing in the client. Never write. Just wait for the
|
|
|
|
// server's half to be done.
|
|
|
|
client := func(*TCPConn) error {
|
|
|
|
<-serverDone
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
withTCPConnPair(t, client, server)
|
|
|
|
}
|
2016-11-14 20:31:47 -07:00
|
|
|
|
|
|
|
// Issue 17695: verify that a blocked Read is woken up by a Close.
|
|
|
|
func TestCloseUnblocksRead(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
server := func(cs *TCPConn) error {
|
|
|
|
// Give the client time to get stuck in a Read:
|
|
|
|
time.Sleep(20 * time.Millisecond)
|
|
|
|
cs.Close()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
client := func(ss *TCPConn) error {
|
|
|
|
n, err := ss.Read([]byte{0})
|
|
|
|
if n != 0 || err != io.EOF {
|
|
|
|
return fmt.Errorf("Read = %v, %v; want 0, EOF", n, err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
withTCPConnPair(t, client, server)
|
|
|
|
}
|
2018-04-30 21:23:37 -06:00
|
|
|
|
|
|
|
// Issue 24808: verify that ECONNRESET is not temporary for read.
|
|
|
|
func TestNotTemporaryRead(t *testing.T) {
|
2018-08-03 10:58:07 -06:00
|
|
|
if runtime.GOOS == "freebsd" {
|
|
|
|
testenv.SkipFlaky(t, 25289)
|
|
|
|
}
|
2019-07-11 01:43:38 -06:00
|
|
|
if runtime.GOOS == "aix" {
|
|
|
|
testenv.SkipFlaky(t, 29685)
|
|
|
|
}
|
2018-04-30 21:23:37 -06:00
|
|
|
t.Parallel()
|
|
|
|
server := func(cs *TCPConn) error {
|
|
|
|
cs.SetLinger(0)
|
|
|
|
// Give the client time to get stuck in a Read.
|
2019-01-16 01:03:57 -07:00
|
|
|
time.Sleep(50 * time.Millisecond)
|
2018-04-30 21:23:37 -06:00
|
|
|
cs.Close()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
client := func(ss *TCPConn) error {
|
|
|
|
_, err := ss.Read([]byte{0})
|
|
|
|
if err == nil {
|
|
|
|
return errors.New("Read succeeded unexpectedly")
|
|
|
|
} else if err == io.EOF {
|
|
|
|
// This happens on NaCl and Plan 9.
|
|
|
|
return nil
|
|
|
|
} else if ne, ok := err.(Error); !ok {
|
|
|
|
return fmt.Errorf("unexpected error %v", err)
|
|
|
|
} else if ne.Temporary() {
|
|
|
|
return fmt.Errorf("unexpected temporary error %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
withTCPConnPair(t, client, server)
|
|
|
|
}
|