2010-06-29 21:23:39 -06:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package net
|
|
|
|
|
|
|
|
import (
|
2012-02-13 22:57:57 -07:00
|
|
|
"errors"
|
2010-06-29 21:23:39 -06:00
|
|
|
"os"
|
2011-01-19 12:49:25 -07:00
|
|
|
"runtime"
|
2010-06-29 21:23:39 -06:00
|
|
|
"sync"
|
|
|
|
"syscall"
|
2011-01-19 12:49:25 -07:00
|
|
|
"time"
|
2010-06-29 21:23:39 -06:00
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2013-08-19 13:09:24 -06:00
|
|
|
var (
|
|
|
|
initErr error
|
|
|
|
ioSync uint64
|
|
|
|
)
|
2011-02-22 20:40:24 -07:00
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// CancelIo Windows API cancels all outstanding IO for a particular
|
|
|
|
// socket on current thread. To overcome that limitation, we run
|
|
|
|
// special goroutine, locked to OS single thread, that both starts
|
|
|
|
// and cancels IO. It means, there are 2 unavoidable thread switches
|
|
|
|
// for every IO.
|
|
|
|
// Some newer versions of Windows has new CancelIoEx API, that does
|
|
|
|
// not have that limitation and can be used from any thread. This
|
|
|
|
// package uses CancelIoEx API, if present, otherwise it fallback
|
|
|
|
// to CancelIo.
|
|
|
|
|
2013-08-08 07:36:43 -06:00
|
|
|
var (
|
|
|
|
canCancelIO bool // determines if CancelIoEx API is present
|
|
|
|
skipSyncNotif bool
|
|
|
|
hasLoadSetFileCompletionNotificationModes bool
|
|
|
|
)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2012-11-06 22:58:20 -07:00
|
|
|
func sysInit() {
|
2011-02-22 20:40:24 -07:00
|
|
|
var d syscall.WSAData
|
2011-09-08 00:32:40 -06:00
|
|
|
e := syscall.WSAStartup(uint32(0x202), &d)
|
2011-12-07 18:07:21 -07:00
|
|
|
if e != nil {
|
|
|
|
initErr = os.NewSyscallError("WSAStartup", e)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
canCancelIO = syscall.LoadCancelIoEx() == nil
|
2012-11-06 22:58:20 -07:00
|
|
|
if syscall.LoadGetAddrInfo() == nil {
|
2013-01-17 23:05:04 -07:00
|
|
|
lookupPort = newLookupPort
|
2012-11-06 22:58:20 -07:00
|
|
|
lookupIP = newLookupIP
|
|
|
|
}
|
2013-08-08 07:36:43 -06:00
|
|
|
|
|
|
|
hasLoadSetFileCompletionNotificationModes = syscall.LoadSetFileCompletionNotificationModes() == nil
|
|
|
|
if hasLoadSetFileCompletionNotificationModes {
|
|
|
|
// It's not safe to use FILE_SKIP_COMPLETION_PORT_ON_SUCCESS if non IFS providers are installed:
|
|
|
|
// http://support.microsoft.com/kb/2568167
|
|
|
|
skipSyncNotif = true
|
|
|
|
protos := [2]int32{syscall.IPPROTO_TCP, 0}
|
|
|
|
var buf [32]syscall.WSAProtocolInfo
|
|
|
|
len := uint32(unsafe.Sizeof(buf))
|
|
|
|
n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len)
|
|
|
|
if err != nil {
|
|
|
|
skipSyncNotif = false
|
|
|
|
} else {
|
|
|
|
for i := int32(0); i < n; i++ {
|
|
|
|
if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 {
|
|
|
|
skipSyncNotif = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2013-01-10 18:42:09 -07:00
|
|
|
func canUseConnectEx(net string) bool {
|
2013-08-23 04:31:24 -06:00
|
|
|
switch net {
|
|
|
|
case "udp", "udp4", "udp6", "ip", "ip4", "ip6":
|
2013-01-10 18:42:09 -07:00
|
|
|
// ConnectEx windows API does not support connectionless sockets.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return syscall.LoadConnectEx() == nil
|
|
|
|
}
|
|
|
|
|
net: implement TCP connection setup with fast failover
This CL adds minimal support of Happy Eyeballs-like TCP connection
setup to Dialer API. Happy Eyeballs and derivation techniques are
described in the following:
- Happy Eyeballs: Success with Dual-Stack Hosts
http://tools.ietf.org/html/rfc6555
- Analysing Dual Stack Behaviour and IPv6 Quality
http://www.potaroo.net/presentations/2012-04-17-dual-stack-quality.pdf
Usually, the techniques consist of three components below.
- DNS query racers, that run A and AAAA queries in parallel or series
- A short list of destination addresses
- TCP SYN racers, that run IPv4 and IPv6 transport in parallel or series
This CL implements only the latter two. The existing DNS query
component gathers together A and AAAA records in series, so we don't
touch it here. This CL just uses extended resolveInternetAddr and makes
it possible to run multiple Dial racers in parallel.
For example, when the given destination is a DNS name and the name has
multiple address family A and AAAA records, and it happens on the TCP
wildcard network "tcp" with DualStack=true like the following:
(&net.Dialer{DualStack: true}).Dial("tcp", "www.example.com:80")
The function will return a first established connection either TCP over
IPv4 or TCP over IPv6, and close the other connection internally.
Fixes #3610.
Fixes #5267.
Benchmark results on freebsd/amd64 virtual machine, tip vs. tip+12416043:
benchmark old ns/op new ns/op delta
BenchmarkTCP4OneShot 50696 52141 +2.85%
BenchmarkTCP4OneShotTimeout 65775 66426 +0.99%
BenchmarkTCP4Persistent 10986 10457 -4.82%
BenchmarkTCP4PersistentTimeout 11207 10445 -6.80%
BenchmarkTCP6OneShot 62009 63718 +2.76%
BenchmarkTCP6OneShotTimeout 78351 79138 +1.00%
BenchmarkTCP6Persistent 14695 14659 -0.24%
BenchmarkTCP6PersistentTimeout 15032 14646 -2.57%
BenchmarkTCP4ConcurrentReadWrite 7215 6217 -13.83%
BenchmarkTCP6ConcurrentReadWrite 7528 7493 -0.46%
benchmark old allocs new allocs delta
BenchmarkTCP4OneShot 36 36 0.00%
BenchmarkTCP4OneShotTimeout 36 36 0.00%
BenchmarkTCP4Persistent 0 0 n/a%
BenchmarkTCP4PersistentTimeout 0 0 n/a%
BenchmarkTCP6OneShot 37 37 0.00%
BenchmarkTCP6OneShotTimeout 37 37 0.00%
BenchmarkTCP6Persistent 0 0 n/a%
BenchmarkTCP6PersistentTimeout 0 0 n/a%
BenchmarkTCP4ConcurrentReadWrite 0 0 n/a%
BenchmarkTCP6ConcurrentReadWrite 0 0 n/a%
benchmark old bytes new bytes delta
BenchmarkTCP4OneShot 2500 2503 0.12%
BenchmarkTCP4OneShotTimeout 2508 2505 -0.12%
BenchmarkTCP4Persistent 0 0 n/a%
BenchmarkTCP4PersistentTimeout 0 0 n/a%
BenchmarkTCP6OneShot 2713 2707 -0.22%
BenchmarkTCP6OneShotTimeout 2722 2720 -0.07%
BenchmarkTCP6Persistent 0 0 n/a%
BenchmarkTCP6PersistentTimeout 0 0 n/a%
BenchmarkTCP4ConcurrentReadWrite 0 0 n/a%
BenchmarkTCP6ConcurrentReadWrite 0 0 n/a%
R=golang-dev, bradfitz, nightlyone, rsc
CC=golang-dev
https://golang.org/cl/12416043
2013-09-11 08:48:53 -06:00
|
|
|
func dial(net string, ra Addr, dialer func(time.Time) (Conn, error), deadline time.Time) (Conn, error) {
|
2013-01-10 18:42:09 -07:00
|
|
|
if !canUseConnectEx(net) {
|
|
|
|
// Use the relatively inefficient goroutine-racing
|
|
|
|
// implementation of DialTimeout.
|
net: implement TCP connection setup with fast failover
This CL adds minimal support of Happy Eyeballs-like TCP connection
setup to Dialer API. Happy Eyeballs and derivation techniques are
described in the following:
- Happy Eyeballs: Success with Dual-Stack Hosts
http://tools.ietf.org/html/rfc6555
- Analysing Dual Stack Behaviour and IPv6 Quality
http://www.potaroo.net/presentations/2012-04-17-dual-stack-quality.pdf
Usually, the techniques consist of three components below.
- DNS query racers, that run A and AAAA queries in parallel or series
- A short list of destination addresses
- TCP SYN racers, that run IPv4 and IPv6 transport in parallel or series
This CL implements only the latter two. The existing DNS query
component gathers together A and AAAA records in series, so we don't
touch it here. This CL just uses extended resolveInternetAddr and makes
it possible to run multiple Dial racers in parallel.
For example, when the given destination is a DNS name and the name has
multiple address family A and AAAA records, and it happens on the TCP
wildcard network "tcp" with DualStack=true like the following:
(&net.Dialer{DualStack: true}).Dial("tcp", "www.example.com:80")
The function will return a first established connection either TCP over
IPv4 or TCP over IPv6, and close the other connection internally.
Fixes #3610.
Fixes #5267.
Benchmark results on freebsd/amd64 virtual machine, tip vs. tip+12416043:
benchmark old ns/op new ns/op delta
BenchmarkTCP4OneShot 50696 52141 +2.85%
BenchmarkTCP4OneShotTimeout 65775 66426 +0.99%
BenchmarkTCP4Persistent 10986 10457 -4.82%
BenchmarkTCP4PersistentTimeout 11207 10445 -6.80%
BenchmarkTCP6OneShot 62009 63718 +2.76%
BenchmarkTCP6OneShotTimeout 78351 79138 +1.00%
BenchmarkTCP6Persistent 14695 14659 -0.24%
BenchmarkTCP6PersistentTimeout 15032 14646 -2.57%
BenchmarkTCP4ConcurrentReadWrite 7215 6217 -13.83%
BenchmarkTCP6ConcurrentReadWrite 7528 7493 -0.46%
benchmark old allocs new allocs delta
BenchmarkTCP4OneShot 36 36 0.00%
BenchmarkTCP4OneShotTimeout 36 36 0.00%
BenchmarkTCP4Persistent 0 0 n/a%
BenchmarkTCP4PersistentTimeout 0 0 n/a%
BenchmarkTCP6OneShot 37 37 0.00%
BenchmarkTCP6OneShotTimeout 37 37 0.00%
BenchmarkTCP6Persistent 0 0 n/a%
BenchmarkTCP6PersistentTimeout 0 0 n/a%
BenchmarkTCP4ConcurrentReadWrite 0 0 n/a%
BenchmarkTCP6ConcurrentReadWrite 0 0 n/a%
benchmark old bytes new bytes delta
BenchmarkTCP4OneShot 2500 2503 0.12%
BenchmarkTCP4OneShotTimeout 2508 2505 -0.12%
BenchmarkTCP4Persistent 0 0 n/a%
BenchmarkTCP4PersistentTimeout 0 0 n/a%
BenchmarkTCP6OneShot 2713 2707 -0.22%
BenchmarkTCP6OneShotTimeout 2722 2720 -0.07%
BenchmarkTCP6Persistent 0 0 n/a%
BenchmarkTCP6PersistentTimeout 0 0 n/a%
BenchmarkTCP4ConcurrentReadWrite 0 0 n/a%
BenchmarkTCP6ConcurrentReadWrite 0 0 n/a%
R=golang-dev, bradfitz, nightlyone, rsc
CC=golang-dev
https://golang.org/cl/12416043
2013-09-11 08:48:53 -06:00
|
|
|
return dialChannel(net, ra, dialer, deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
net: implement TCP connection setup with fast failover
This CL adds minimal support of Happy Eyeballs-like TCP connection
setup to Dialer API. Happy Eyeballs and derivation techniques are
described in the following:
- Happy Eyeballs: Success with Dual-Stack Hosts
http://tools.ietf.org/html/rfc6555
- Analysing Dual Stack Behaviour and IPv6 Quality
http://www.potaroo.net/presentations/2012-04-17-dual-stack-quality.pdf
Usually, the techniques consist of three components below.
- DNS query racers, that run A and AAAA queries in parallel or series
- A short list of destination addresses
- TCP SYN racers, that run IPv4 and IPv6 transport in parallel or series
This CL implements only the latter two. The existing DNS query
component gathers together A and AAAA records in series, so we don't
touch it here. This CL just uses extended resolveInternetAddr and makes
it possible to run multiple Dial racers in parallel.
For example, when the given destination is a DNS name and the name has
multiple address family A and AAAA records, and it happens on the TCP
wildcard network "tcp" with DualStack=true like the following:
(&net.Dialer{DualStack: true}).Dial("tcp", "www.example.com:80")
The function will return a first established connection either TCP over
IPv4 or TCP over IPv6, and close the other connection internally.
Fixes #3610.
Fixes #5267.
Benchmark results on freebsd/amd64 virtual machine, tip vs. tip+12416043:
benchmark old ns/op new ns/op delta
BenchmarkTCP4OneShot 50696 52141 +2.85%
BenchmarkTCP4OneShotTimeout 65775 66426 +0.99%
BenchmarkTCP4Persistent 10986 10457 -4.82%
BenchmarkTCP4PersistentTimeout 11207 10445 -6.80%
BenchmarkTCP6OneShot 62009 63718 +2.76%
BenchmarkTCP6OneShotTimeout 78351 79138 +1.00%
BenchmarkTCP6Persistent 14695 14659 -0.24%
BenchmarkTCP6PersistentTimeout 15032 14646 -2.57%
BenchmarkTCP4ConcurrentReadWrite 7215 6217 -13.83%
BenchmarkTCP6ConcurrentReadWrite 7528 7493 -0.46%
benchmark old allocs new allocs delta
BenchmarkTCP4OneShot 36 36 0.00%
BenchmarkTCP4OneShotTimeout 36 36 0.00%
BenchmarkTCP4Persistent 0 0 n/a%
BenchmarkTCP4PersistentTimeout 0 0 n/a%
BenchmarkTCP6OneShot 37 37 0.00%
BenchmarkTCP6OneShotTimeout 37 37 0.00%
BenchmarkTCP6Persistent 0 0 n/a%
BenchmarkTCP6PersistentTimeout 0 0 n/a%
BenchmarkTCP4ConcurrentReadWrite 0 0 n/a%
BenchmarkTCP6ConcurrentReadWrite 0 0 n/a%
benchmark old bytes new bytes delta
BenchmarkTCP4OneShot 2500 2503 0.12%
BenchmarkTCP4OneShotTimeout 2508 2505 -0.12%
BenchmarkTCP4Persistent 0 0 n/a%
BenchmarkTCP4PersistentTimeout 0 0 n/a%
BenchmarkTCP6OneShot 2713 2707 -0.22%
BenchmarkTCP6OneShotTimeout 2722 2720 -0.07%
BenchmarkTCP6Persistent 0 0 n/a%
BenchmarkTCP6PersistentTimeout 0 0 n/a%
BenchmarkTCP4ConcurrentReadWrite 0 0 n/a%
BenchmarkTCP6ConcurrentReadWrite 0 0 n/a%
R=golang-dev, bradfitz, nightlyone, rsc
CC=golang-dev
https://golang.org/cl/12416043
2013-09-11 08:48:53 -06:00
|
|
|
return dialer(deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// operation contains superset of data necessary to perform all async IO.
|
|
|
|
type operation struct {
|
2011-02-22 20:40:24 -07:00
|
|
|
// Used by IOCP interface, it must be first field
|
|
|
|
// of the struct, as our code rely on it.
|
|
|
|
o syscall.Overlapped
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2013-07-21 20:49:57 -06:00
|
|
|
// fields used by runtime.netpoll
|
|
|
|
runtimeCtx uintptr
|
|
|
|
mode int32
|
|
|
|
errno int32
|
|
|
|
qty uint32
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// fields used only by net package
|
2013-07-21 20:49:57 -06:00
|
|
|
fd *netFD
|
2013-08-06 04:40:10 -06:00
|
|
|
errc chan error
|
|
|
|
buf syscall.WSABuf
|
|
|
|
sa syscall.Sockaddr
|
|
|
|
rsa *syscall.RawSockaddrAny
|
|
|
|
rsan int32
|
|
|
|
handle syscall.Handle
|
|
|
|
flags uint32
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
func (o *operation) InitBuf(buf []byte) {
|
2011-02-22 20:40:24 -07:00
|
|
|
o.buf.Len = uint32(len(buf))
|
2013-08-06 04:40:10 -06:00
|
|
|
o.buf.Buf = nil
|
|
|
|
if len(buf) != 0 {
|
2014-02-06 22:58:45 -07:00
|
|
|
o.buf.Buf = &buf[0]
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// ioSrv executes net IO requests.
|
2011-02-22 20:40:24 -07:00
|
|
|
type ioSrv struct {
|
2013-08-06 04:40:10 -06:00
|
|
|
req chan ioSrvReq
|
|
|
|
}
|
|
|
|
|
|
|
|
type ioSrvReq struct {
|
|
|
|
o *operation
|
|
|
|
submit func(o *operation) error // if nil, cancel the operation
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// ProcessRemoteIO will execute submit IO requests on behalf
|
2011-02-22 20:40:24 -07:00
|
|
|
// of other goroutines, all on a single os thread, so it can
|
|
|
|
// cancel them later. Results of all operations will be sent
|
|
|
|
// back to their requesters via channel supplied in request.
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// It is used only when the CancelIoEx API is unavailable.
|
2011-02-22 20:40:24 -07:00
|
|
|
func (s *ioSrv) ProcessRemoteIO() {
|
|
|
|
runtime.LockOSThread()
|
|
|
|
defer runtime.UnlockOSThread()
|
2013-08-06 04:40:10 -06:00
|
|
|
for r := range s.req {
|
|
|
|
if r.submit != nil {
|
|
|
|
r.o.errc <- r.submit(r.o)
|
|
|
|
} else {
|
|
|
|
r.o.errc <- syscall.CancelIo(r.o.fd.sysfd)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// ExecIO executes a single IO operation o. It submits and cancels
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// IO in the current thread for systems where Windows CancelIoEx API
|
|
|
|
// is available. Alternatively, it passes the request onto
|
2013-07-21 20:49:57 -06:00
|
|
|
// runtime netpoll and waits for completion or cancels request.
|
2013-08-06 04:40:10 -06:00
|
|
|
func (s *ioSrv) ExecIO(o *operation, name string, submit func(o *operation) error) (int, error) {
|
|
|
|
fd := o.fd
|
2013-07-21 20:49:57 -06:00
|
|
|
// Notify runtime netpoll about starting IO.
|
2013-08-06 04:40:10 -06:00
|
|
|
err := fd.pd.Prepare(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if err != nil {
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2012-11-24 16:02:57 -07:00
|
|
|
}
|
|
|
|
// Start IO.
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if canCancelIO {
|
2013-08-06 04:40:10 -06:00
|
|
|
err = submit(o)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
} else {
|
2011-02-22 20:40:24 -07:00
|
|
|
// Send request to a special dedicated thread,
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// so it can stop the IO with CancelIO later.
|
2013-08-06 04:40:10 -06:00
|
|
|
s.req <- ioSrvReq{o, submit}
|
|
|
|
err = <-o.errc
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2012-01-31 08:36:45 -07:00
|
|
|
switch err {
|
2011-11-13 20:44:52 -07:00
|
|
|
case nil:
|
2013-08-08 07:36:43 -06:00
|
|
|
// IO completed immediately
|
|
|
|
if o.fd.skipSyncNotif {
|
|
|
|
// No completion message will follow, so return immediately.
|
|
|
|
return int(o.qty), nil
|
|
|
|
}
|
|
|
|
// Need to get our completion message anyway.
|
2011-02-22 20:40:24 -07:00
|
|
|
case syscall.ERROR_IO_PENDING:
|
2011-07-13 11:54:51 -06:00
|
|
|
// IO started, and we have to wait for its completion.
|
2012-01-31 09:20:34 -07:00
|
|
|
err = nil
|
2011-02-22 20:40:24 -07:00
|
|
|
default:
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// Wait for our request to complete.
|
2013-08-06 04:40:10 -06:00
|
|
|
err = fd.pd.Wait(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if err == nil {
|
|
|
|
// All is good. Extract our IO results and return.
|
|
|
|
if o.errno != 0 {
|
|
|
|
err = syscall.Errno(o.errno)
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
return int(o.qty), nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
// IO is interrupted by "close" or "timeout"
|
|
|
|
netpollErr := err
|
|
|
|
switch netpollErr {
|
|
|
|
case errClosing, errTimeout:
|
|
|
|
// will deal with those.
|
|
|
|
default:
|
|
|
|
panic("net: unexpected runtime.netpoll error: " + netpollErr.Error())
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
// Cancel our request.
|
|
|
|
if canCancelIO {
|
2013-08-06 04:40:10 -06:00
|
|
|
err := syscall.CancelIoEx(fd.sysfd, &o.o)
|
2013-07-21 20:49:57 -06:00
|
|
|
// Assuming ERROR_NOT_FOUND is returned, if IO is completed.
|
|
|
|
if err != nil && err != syscall.ERROR_NOT_FOUND {
|
|
|
|
// TODO(brainman): maybe do something else, but panic.
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
} else {
|
2013-08-06 04:40:10 -06:00
|
|
|
s.req <- ioSrvReq{o, nil}
|
|
|
|
<-o.errc
|
2013-07-21 20:49:57 -06:00
|
|
|
}
|
|
|
|
// Wait for cancellation to complete.
|
2013-08-06 04:40:10 -06:00
|
|
|
fd.pd.WaitCanceled(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if o.errno != 0 {
|
|
|
|
err = syscall.Errno(o.errno)
|
|
|
|
if err == syscall.ERROR_OPERATION_ABORTED { // IO Canceled
|
|
|
|
err = netpollErr
|
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2013-07-21 20:49:57 -06:00
|
|
|
}
|
|
|
|
// We issued cancellation request. But, it seems, IO operation succeeded
|
|
|
|
// before cancellation request run. We need to treat IO operation as
|
|
|
|
// succeeded (the bytes are actually sent/recv from network).
|
|
|
|
return int(o.qty), nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// Start helper goroutines.
|
2013-08-26 22:53:57 -06:00
|
|
|
var rsrv, wsrv *ioSrv
|
2010-08-05 14:14:41 -06:00
|
|
|
var onceStartServer sync.Once
|
2010-06-29 21:23:39 -06:00
|
|
|
|
|
|
|
func startServer() {
|
2013-08-26 22:53:57 -06:00
|
|
|
rsrv = new(ioSrv)
|
|
|
|
wsrv = new(ioSrv)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if !canCancelIO {
|
2013-08-26 22:53:57 -06:00
|
|
|
// Only CancelIo API is available. Lets start two special goroutines
|
|
|
|
// locked to an OS thread, that both starts and cancels IO. One will
|
|
|
|
// process read requests, while other will do writes.
|
|
|
|
rsrv.req = make(chan ioSrvReq)
|
|
|
|
go rsrv.ProcessRemoteIO()
|
|
|
|
wsrv.req = make(chan ioSrvReq)
|
|
|
|
go wsrv.ProcessRemoteIO()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// Network file descriptor.
|
|
|
|
type netFD struct {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
// locking/lifetime of sysfd + serialize access to Read and Write methods
|
|
|
|
fdmu fdMutex
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// immutable until Close
|
2013-08-08 07:36:43 -06:00
|
|
|
sysfd syscall.Handle
|
|
|
|
family int
|
|
|
|
sotype int
|
|
|
|
isConnected bool
|
|
|
|
skipSyncNotif bool
|
|
|
|
net string
|
|
|
|
laddr Addr
|
|
|
|
raddr Addr
|
2011-02-22 20:40:24 -07:00
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
rop operation // read operation
|
|
|
|
wop operation // write operation
|
2012-12-04 21:59:01 -07:00
|
|
|
|
2013-07-21 20:49:57 -06:00
|
|
|
// wait server
|
|
|
|
pd pollDesc
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
func newFD(sysfd syscall.Handle, family, sotype int, net string) (*netFD, error) {
|
2013-07-21 20:49:57 -06:00
|
|
|
if initErr != nil {
|
|
|
|
return nil, initErr
|
|
|
|
}
|
|
|
|
onceStartServer.Do(startServer)
|
2013-08-06 08:42:33 -06:00
|
|
|
return &netFD{sysfd: sysfd, family: family, sotype: sotype, net: net}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fd *netFD) init() error {
|
2013-08-06 04:40:10 -06:00
|
|
|
if err := fd.pd.Init(fd); err != nil {
|
2013-08-06 08:42:33 -06:00
|
|
|
return err
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
2013-08-08 07:36:43 -06:00
|
|
|
if hasLoadSetFileCompletionNotificationModes {
|
|
|
|
// We do not use events, so we can skip them always.
|
|
|
|
flags := uint8(syscall.FILE_SKIP_SET_EVENT_ON_HANDLE)
|
|
|
|
// It's not safe to skip completion notifications for UDP:
|
|
|
|
// http://blogs.technet.com/b/winserverperformance/archive/2008/06/26/designing-applications-for-high-performance-part-iii.aspx
|
|
|
|
if skipSyncNotif && fd.net == "tcp" {
|
|
|
|
flags |= syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
|
|
|
|
}
|
|
|
|
err := syscall.SetFileCompletionNotificationModes(fd.sysfd, flags)
|
|
|
|
if err == nil && flags&syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS != 0 {
|
|
|
|
fd.skipSyncNotif = true
|
|
|
|
}
|
|
|
|
}
|
2014-10-09 16:21:32 -06:00
|
|
|
// Disable SIO_UDP_CONNRESET behavior.
|
|
|
|
// http://support.microsoft.com/kb/263823
|
|
|
|
switch fd.net {
|
|
|
|
case "udp", "udp4", "udp6":
|
|
|
|
ret := uint32(0)
|
|
|
|
flag := uint32(0)
|
|
|
|
size := uint32(unsafe.Sizeof(flag))
|
|
|
|
err := syscall.WSAIoctl(fd.sysfd, syscall.SIO_UDP_CONNRESET, (*byte)(unsafe.Pointer(&flag)), size, nil, 0, &ret, nil, 0)
|
|
|
|
if err != nil {
|
|
|
|
return os.NewSyscallError("WSAIoctl", err)
|
|
|
|
}
|
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
fd.rop.mode = 'r'
|
|
|
|
fd.wop.mode = 'w'
|
|
|
|
fd.rop.fd = fd
|
|
|
|
fd.wop.fd = fd
|
|
|
|
fd.rop.runtimeCtx = fd.pd.runtimeCtx
|
|
|
|
fd.wop.runtimeCtx = fd.pd.runtimeCtx
|
|
|
|
if !canCancelIO {
|
|
|
|
fd.rop.errc = make(chan error)
|
2013-08-06 21:36:41 -06:00
|
|
|
fd.wop.errc = make(chan error)
|
2013-08-06 04:40:10 -06:00
|
|
|
}
|
2013-08-06 08:42:33 -06:00
|
|
|
return nil
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fd *netFD) setAddr(laddr, raddr Addr) {
|
|
|
|
fd.laddr = laddr
|
|
|
|
fd.raddr = raddr
|
2013-07-29 10:01:13 -06:00
|
|
|
runtime.SetFinalizer(fd, (*netFD).Close)
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
|
|
|
|
2014-07-29 01:48:11 -06:00
|
|
|
func (fd *netFD) connect(la, ra syscall.Sockaddr, deadline time.Time) error {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
// Do not need to call fd.writeLock here,
|
|
|
|
// because fd is not yet accessible to user,
|
|
|
|
// so no concurrent operations are possible.
|
2014-07-29 01:48:11 -06:00
|
|
|
if err := fd.init(); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if !deadline.IsZero() {
|
|
|
|
fd.setWriteDeadline(deadline)
|
|
|
|
defer fd.setWriteDeadline(noDeadline)
|
|
|
|
}
|
2013-01-10 18:42:09 -07:00
|
|
|
if !canUseConnectEx(fd.net) {
|
net: add socket system call hooks for testing
This change adds socket system call hooks to existing test cases for
simulating a bit complicated network conditions to help making timeout
and dual IP stack test cases work more properly in followup changes.
Also test cases print debugging information in non-short mode like the
following:
Leaked goroutines:
net.TestWriteTimeout.func2(0xc20802a5a0, 0xc20801d000, 0x1000, 0x1000, 0xc2081d2ae0)
/go/src/net/timeout_test.go:170 +0x98
created by net.TestWriteTimeout
/go/src/net/timeout_test.go:173 +0x745
net.runDatagramPacketConnServer(0xc2080730e0, 0x2bd270, 0x3, 0x2c1770, 0xb, 0xc2081d2ba0, 0xc2081d2c00)
/go/src/net/server_test.go:398 +0x667
created by net.TestTimeoutUDP
/go/src/net/timeout_test.go:247 +0xc9
(snip)
Leaked sockets:
3: {Cookie:615726511685632 Err:<nil> SocketErr:0}
5: {Cookie:7934075906097152 Err:<nil> SocketErr:0}
Socket statistical information:
{Family:1 Type:805306370 Protocol:0 Opened:17 Accepted:0 Connected:5 Closed:17}
{Family:2 Type:805306369 Protocol:0 Opened:450 Accepted:234 Connected:279 Closed:636}
{Family:1 Type:805306369 Protocol:0 Opened:11 Accepted:5 Connected:5 Closed:16}
{Family:28 Type:805306369 Protocol:0 Opened:95 Accepted:22 Connected:16 Closed:116}
{Family:2 Type:805306370 Protocol:0 Opened:84 Accepted:0 Connected:34 Closed:83}
{Family:28 Type:805306370 Protocol:0 Opened:52 Accepted:0 Connected:4 Closed:52}
Change-Id: I0e84be59a0699bc31245c78e2249423459b8cdda
Reviewed-on: https://go-review.googlesource.com/6390
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-02-28 20:27:01 -07:00
|
|
|
return connectFunc(fd.sysfd, ra)
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
|
|
|
// ConnectEx windows API requires an unconnected, previously bound socket.
|
2013-04-30 18:47:39 -06:00
|
|
|
if la == nil {
|
|
|
|
switch ra.(type) {
|
|
|
|
case *syscall.SockaddrInet4:
|
|
|
|
la = &syscall.SockaddrInet4{}
|
|
|
|
case *syscall.SockaddrInet6:
|
|
|
|
la = &syscall.SockaddrInet6{}
|
|
|
|
default:
|
|
|
|
panic("unexpected type in connect")
|
|
|
|
}
|
|
|
|
if err := syscall.Bind(fd.sysfd, la); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
|
|
|
// Call ConnectEx API.
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.sa = ra
|
2013-08-26 22:53:57 -06:00
|
|
|
_, err := wsrv.ExecIO(o, "ConnectEx", func(o *operation) error {
|
net: add socket system call hooks for testing
This change adds socket system call hooks to existing test cases for
simulating a bit complicated network conditions to help making timeout
and dual IP stack test cases work more properly in followup changes.
Also test cases print debugging information in non-short mode like the
following:
Leaked goroutines:
net.TestWriteTimeout.func2(0xc20802a5a0, 0xc20801d000, 0x1000, 0x1000, 0xc2081d2ae0)
/go/src/net/timeout_test.go:170 +0x98
created by net.TestWriteTimeout
/go/src/net/timeout_test.go:173 +0x745
net.runDatagramPacketConnServer(0xc2080730e0, 0x2bd270, 0x3, 0x2c1770, 0xb, 0xc2081d2ba0, 0xc2081d2c00)
/go/src/net/server_test.go:398 +0x667
created by net.TestTimeoutUDP
/go/src/net/timeout_test.go:247 +0xc9
(snip)
Leaked sockets:
3: {Cookie:615726511685632 Err:<nil> SocketErr:0}
5: {Cookie:7934075906097152 Err:<nil> SocketErr:0}
Socket statistical information:
{Family:1 Type:805306370 Protocol:0 Opened:17 Accepted:0 Connected:5 Closed:17}
{Family:2 Type:805306369 Protocol:0 Opened:450 Accepted:234 Connected:279 Closed:636}
{Family:1 Type:805306369 Protocol:0 Opened:11 Accepted:5 Connected:5 Closed:16}
{Family:28 Type:805306369 Protocol:0 Opened:95 Accepted:22 Connected:16 Closed:116}
{Family:2 Type:805306370 Protocol:0 Opened:84 Accepted:0 Connected:34 Closed:83}
{Family:28 Type:805306370 Protocol:0 Opened:52 Accepted:0 Connected:4 Closed:52}
Change-Id: I0e84be59a0699bc31245c78e2249423459b8cdda
Reviewed-on: https://go-review.googlesource.com/6390
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-02-28 20:27:01 -07:00
|
|
|
return connectExFunc(o.fd.sysfd, o.sa, nil, 0, nil, &o.o)
|
2013-08-06 04:40:10 -06:00
|
|
|
})
|
2013-01-10 18:42:09 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Refresh socket properties.
|
|
|
|
return syscall.Setsockopt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_UPDATE_CONNECT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
func (fd *netFD) destroy() {
|
|
|
|
if fd.sysfd == syscall.InvalidHandle {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Poller may want to unregister fd in readiness notification mechanism,
|
net: add socket system call hooks for testing
This change adds socket system call hooks to existing test cases for
simulating a bit complicated network conditions to help making timeout
and dual IP stack test cases work more properly in followup changes.
Also test cases print debugging information in non-short mode like the
following:
Leaked goroutines:
net.TestWriteTimeout.func2(0xc20802a5a0, 0xc20801d000, 0x1000, 0x1000, 0xc2081d2ae0)
/go/src/net/timeout_test.go:170 +0x98
created by net.TestWriteTimeout
/go/src/net/timeout_test.go:173 +0x745
net.runDatagramPacketConnServer(0xc2080730e0, 0x2bd270, 0x3, 0x2c1770, 0xb, 0xc2081d2ba0, 0xc2081d2c00)
/go/src/net/server_test.go:398 +0x667
created by net.TestTimeoutUDP
/go/src/net/timeout_test.go:247 +0xc9
(snip)
Leaked sockets:
3: {Cookie:615726511685632 Err:<nil> SocketErr:0}
5: {Cookie:7934075906097152 Err:<nil> SocketErr:0}
Socket statistical information:
{Family:1 Type:805306370 Protocol:0 Opened:17 Accepted:0 Connected:5 Closed:17}
{Family:2 Type:805306369 Protocol:0 Opened:450 Accepted:234 Connected:279 Closed:636}
{Family:1 Type:805306369 Protocol:0 Opened:11 Accepted:5 Connected:5 Closed:16}
{Family:28 Type:805306369 Protocol:0 Opened:95 Accepted:22 Connected:16 Closed:116}
{Family:2 Type:805306370 Protocol:0 Opened:84 Accepted:0 Connected:34 Closed:83}
{Family:28 Type:805306370 Protocol:0 Opened:52 Accepted:0 Connected:4 Closed:52}
Change-Id: I0e84be59a0699bc31245c78e2249423459b8cdda
Reviewed-on: https://go-review.googlesource.com/6390
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-02-28 20:27:01 -07:00
|
|
|
// so this must be executed before closeFunc.
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
fd.pd.Close()
|
net: add socket system call hooks for testing
This change adds socket system call hooks to existing test cases for
simulating a bit complicated network conditions to help making timeout
and dual IP stack test cases work more properly in followup changes.
Also test cases print debugging information in non-short mode like the
following:
Leaked goroutines:
net.TestWriteTimeout.func2(0xc20802a5a0, 0xc20801d000, 0x1000, 0x1000, 0xc2081d2ae0)
/go/src/net/timeout_test.go:170 +0x98
created by net.TestWriteTimeout
/go/src/net/timeout_test.go:173 +0x745
net.runDatagramPacketConnServer(0xc2080730e0, 0x2bd270, 0x3, 0x2c1770, 0xb, 0xc2081d2ba0, 0xc2081d2c00)
/go/src/net/server_test.go:398 +0x667
created by net.TestTimeoutUDP
/go/src/net/timeout_test.go:247 +0xc9
(snip)
Leaked sockets:
3: {Cookie:615726511685632 Err:<nil> SocketErr:0}
5: {Cookie:7934075906097152 Err:<nil> SocketErr:0}
Socket statistical information:
{Family:1 Type:805306370 Protocol:0 Opened:17 Accepted:0 Connected:5 Closed:17}
{Family:2 Type:805306369 Protocol:0 Opened:450 Accepted:234 Connected:279 Closed:636}
{Family:1 Type:805306369 Protocol:0 Opened:11 Accepted:5 Connected:5 Closed:16}
{Family:28 Type:805306369 Protocol:0 Opened:95 Accepted:22 Connected:16 Closed:116}
{Family:2 Type:805306370 Protocol:0 Opened:84 Accepted:0 Connected:34 Closed:83}
{Family:28 Type:805306370 Protocol:0 Opened:52 Accepted:0 Connected:4 Closed:52}
Change-Id: I0e84be59a0699bc31245c78e2249423459b8cdda
Reviewed-on: https://go-review.googlesource.com/6390
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-02-28 20:27:01 -07:00
|
|
|
closeFunc(fd.sysfd)
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
fd.sysfd = syscall.InvalidHandle
|
|
|
|
// no need for a finalizer anymore
|
|
|
|
runtime.SetFinalizer(fd, nil)
|
|
|
|
}
|
|
|
|
|
2010-06-29 21:23:39 -06:00
|
|
|
// Add a reference to this fd.
|
2012-02-13 22:40:37 -07:00
|
|
|
// Returns an error if the fd cannot be used.
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
func (fd *netFD) incref() error {
|
|
|
|
if !fd.fdmu.Incref() {
|
2012-02-13 22:40:37 -07:00
|
|
|
return errClosing
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove a reference to this FD and close if we've been asked to do so
|
|
|
|
// (and there are no references left).
|
|
|
|
func (fd *netFD) decref() {
|
|
|
|
if fd.fdmu.Decref() {
|
|
|
|
fd.destroy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a reference to this fd and lock for reading.
|
|
|
|
// Returns an error if the fd cannot be used.
|
|
|
|
func (fd *netFD) readLock() error {
|
|
|
|
if !fd.fdmu.RWLock(true) {
|
2012-02-13 22:40:37 -07:00
|
|
|
return errClosing
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock for reading and remove a reference to this FD.
|
|
|
|
func (fd *netFD) readUnlock() {
|
|
|
|
if fd.fdmu.RWUnlock(true) {
|
|
|
|
fd.destroy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a reference to this fd and lock for writing.
|
|
|
|
// Returns an error if the fd cannot be used.
|
|
|
|
func (fd *netFD) writeLock() error {
|
|
|
|
if !fd.fdmu.RWLock(false) {
|
|
|
|
return errClosing
|
2012-02-13 22:40:37 -07:00
|
|
|
}
|
|
|
|
return nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
// Unlock for writing and remove a reference to this FD.
|
|
|
|
func (fd *netFD) writeUnlock() {
|
|
|
|
if fd.fdmu.RWUnlock(false) {
|
|
|
|
fd.destroy()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) Close() error {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if !fd.fdmu.IncrefAndClose() {
|
|
|
|
return errClosing
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// unblock pending reader and writer
|
2013-07-21 20:49:57 -06:00
|
|
|
fd.pd.Evict()
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
fd.decref()
|
2010-06-29 21:23:39 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) shutdown(how int) error {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.incref(); err != nil {
|
2012-11-02 03:46:47 -06:00
|
|
|
return err
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
2012-11-02 03:46:47 -06:00
|
|
|
defer fd.decref()
|
2011-11-13 20:44:52 -07:00
|
|
|
err := syscall.Shutdown(fd.sysfd, how)
|
|
|
|
if err != nil {
|
|
|
|
return &OpError{"shutdown", fd.net, fd.laddr, err}
|
2011-10-12 11:45:25 -06:00
|
|
|
}
|
2011-09-28 09:12:38 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2014-04-03 18:07:44 -06:00
|
|
|
func (fd *netFD) closeRead() error {
|
2011-10-12 11:45:25 -06:00
|
|
|
return fd.shutdown(syscall.SHUT_RD)
|
|
|
|
}
|
|
|
|
|
2014-04-03 18:07:44 -06:00
|
|
|
func (fd *netFD) closeWrite() error {
|
2011-10-12 11:45:25 -06:00
|
|
|
return fd.shutdown(syscall.SHUT_WR)
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) Read(buf []byte) (int, error) {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.readLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, err
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.readUnlock()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.InitBuf(buf)
|
2013-08-26 22:53:57 -06:00
|
|
|
n, err := rsrv.ExecIO(o, "WSARecv", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
|
|
|
|
})
|
2013-08-19 13:09:24 -06:00
|
|
|
if raceenabled {
|
|
|
|
raceAcquire(unsafe.Pointer(&ioSync))
|
|
|
|
}
|
2014-12-30 18:08:51 -07:00
|
|
|
err = fd.eofError(n, err)
|
2012-01-31 08:36:45 -07:00
|
|
|
return n, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:06:51 -06:00
|
|
|
func (fd *netFD) readFrom(buf []byte) (n int, sa syscall.Sockaddr, err error) {
|
2011-02-22 20:40:24 -07:00
|
|
|
if len(buf) == 0 {
|
2010-11-22 09:01:30 -07:00
|
|
|
return 0, nil, nil
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.readLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, nil, err
|
2010-11-22 09:01:30 -07:00
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.readUnlock()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.InitBuf(buf)
|
2013-08-26 22:53:57 -06:00
|
|
|
n, err = rsrv.ExecIO(o, "WSARecvFrom", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
if o.rsa == nil {
|
|
|
|
o.rsa = new(syscall.RawSockaddrAny)
|
|
|
|
}
|
|
|
|
o.rsan = int32(unsafe.Sizeof(*o.rsa))
|
|
|
|
return syscall.WSARecvFrom(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
|
|
|
|
})
|
2014-12-30 18:08:51 -07:00
|
|
|
err = fd.eofError(n, err)
|
2011-07-25 19:55:52 -06:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
2011-02-22 20:40:24 -07:00
|
|
|
sa, _ = o.rsa.Sockaddr()
|
2010-11-22 09:01:30 -07:00
|
|
|
return
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) Write(buf []byte) (int, error) {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.writeLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.writeUnlock()
|
2013-08-19 13:09:24 -06:00
|
|
|
if raceenabled {
|
|
|
|
raceReleaseMerge(unsafe.Pointer(&ioSync))
|
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.InitBuf(buf)
|
2013-08-26 22:53:57 -06:00
|
|
|
return wsrv.ExecIO(o, "WSASend", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.WSASend(o.fd.sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil)
|
|
|
|
})
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:06:51 -06:00
|
|
|
func (fd *netFD) writeTo(buf []byte, sa syscall.Sockaddr) (int, error) {
|
2011-02-22 20:40:24 -07:00
|
|
|
if len(buf) == 0 {
|
2010-11-22 09:01:30 -07:00
|
|
|
return 0, nil
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.writeLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, err
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.writeUnlock()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.InitBuf(buf)
|
2011-02-22 20:40:24 -07:00
|
|
|
o.sa = sa
|
2013-08-26 22:53:57 -06:00
|
|
|
return wsrv.ExecIO(o, "WSASendto", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.WSASendto(o.fd.sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
|
|
|
|
})
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2014-08-04 15:10:46 -06:00
|
|
|
func (fd *netFD) acceptOne(rawsa []syscall.RawSockaddrAny, o *operation) (*netFD, error) {
|
2010-06-29 21:23:39 -06:00
|
|
|
// Get new socket.
|
2013-02-03 22:03:41 -07:00
|
|
|
s, err := sysSocket(fd.family, fd.sotype, 0)
|
2012-01-31 08:36:45 -07:00
|
|
|
if err != nil {
|
2012-12-04 21:13:03 -07:00
|
|
|
return nil, &OpError{"socket", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Associate our new socket with IOCP.
|
2013-07-21 20:49:57 -06:00
|
|
|
netfd, err := newFD(s, fd.family, fd.sotype, fd.net)
|
|
|
|
if err != nil {
|
net: add socket system call hooks for testing
This change adds socket system call hooks to existing test cases for
simulating a bit complicated network conditions to help making timeout
and dual IP stack test cases work more properly in followup changes.
Also test cases print debugging information in non-short mode like the
following:
Leaked goroutines:
net.TestWriteTimeout.func2(0xc20802a5a0, 0xc20801d000, 0x1000, 0x1000, 0xc2081d2ae0)
/go/src/net/timeout_test.go:170 +0x98
created by net.TestWriteTimeout
/go/src/net/timeout_test.go:173 +0x745
net.runDatagramPacketConnServer(0xc2080730e0, 0x2bd270, 0x3, 0x2c1770, 0xb, 0xc2081d2ba0, 0xc2081d2c00)
/go/src/net/server_test.go:398 +0x667
created by net.TestTimeoutUDP
/go/src/net/timeout_test.go:247 +0xc9
(snip)
Leaked sockets:
3: {Cookie:615726511685632 Err:<nil> SocketErr:0}
5: {Cookie:7934075906097152 Err:<nil> SocketErr:0}
Socket statistical information:
{Family:1 Type:805306370 Protocol:0 Opened:17 Accepted:0 Connected:5 Closed:17}
{Family:2 Type:805306369 Protocol:0 Opened:450 Accepted:234 Connected:279 Closed:636}
{Family:1 Type:805306369 Protocol:0 Opened:11 Accepted:5 Connected:5 Closed:16}
{Family:28 Type:805306369 Protocol:0 Opened:95 Accepted:22 Connected:16 Closed:116}
{Family:2 Type:805306370 Protocol:0 Opened:84 Accepted:0 Connected:34 Closed:83}
{Family:28 Type:805306370 Protocol:0 Opened:52 Accepted:0 Connected:4 Closed:52}
Change-Id: I0e84be59a0699bc31245c78e2249423459b8cdda
Reviewed-on: https://go-review.googlesource.com/6390
Reviewed-by: Ian Lance Taylor <iant@golang.org>
2015-02-28 20:27:01 -07:00
|
|
|
closeFunc(s)
|
2013-07-21 20:49:57 -06:00
|
|
|
return nil, &OpError{"accept", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
2013-08-06 08:42:33 -06:00
|
|
|
if err := netfd.init(); err != nil {
|
|
|
|
fd.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
|
|
|
// Submit accept request.
|
2013-08-06 04:40:10 -06:00
|
|
|
o.handle = s
|
|
|
|
o.rsan = int32(unsafe.Sizeof(rawsa[0]))
|
2013-08-26 22:53:57 -06:00
|
|
|
_, err = rsrv.ExecIO(o, "AcceptEx", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.AcceptEx(o.fd.sysfd, o.handle, (*byte)(unsafe.Pointer(&rawsa[0])), 0, uint32(o.rsan), uint32(o.rsan), &o.qty, &o.o)
|
|
|
|
})
|
2011-02-22 20:40:24 -07:00
|
|
|
if err != nil {
|
2013-07-29 10:01:13 -06:00
|
|
|
netfd.Close()
|
2011-02-22 20:40:24 -07:00
|
|
|
return nil, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inherit properties of the listening socket.
|
2012-01-31 08:36:45 -07:00
|
|
|
err = syscall.Setsockopt(s, syscall.SOL_SOCKET, syscall.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
|
|
|
|
if err != nil {
|
2013-07-29 10:01:13 -06:00
|
|
|
netfd.Close()
|
2012-12-04 21:13:03 -07:00
|
|
|
return nil, &OpError{"Setsockopt", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2014-01-11 18:20:16 -07:00
|
|
|
return netfd, nil
|
|
|
|
}
|
|
|
|
|
2014-08-04 15:10:46 -06:00
|
|
|
func (fd *netFD) accept() (*netFD, error) {
|
2014-01-11 18:20:16 -07:00
|
|
|
if err := fd.readLock(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
defer fd.readUnlock()
|
|
|
|
|
|
|
|
o := &fd.rop
|
|
|
|
var netfd *netFD
|
|
|
|
var err error
|
|
|
|
var rawsa [2]syscall.RawSockaddrAny
|
|
|
|
for {
|
2014-08-04 15:10:46 -06:00
|
|
|
netfd, err = fd.acceptOne(rawsa[:], o)
|
2014-01-11 18:20:16 -07:00
|
|
|
if err == nil {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
// Sometimes we see WSAECONNRESET and ERROR_NETNAME_DELETED is
|
|
|
|
// returned here. These happen if connection reset is received
|
|
|
|
// before AcceptEx could complete. These errors relate to new
|
|
|
|
// connection, not to AcceptEx, so ignore broken connection and
|
|
|
|
// try AcceptEx again for more connections.
|
|
|
|
operr, ok := err.(*OpError)
|
|
|
|
if !ok {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
errno, ok := operr.Err.(syscall.Errno)
|
|
|
|
if !ok {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
switch errno {
|
|
|
|
case syscall.ERROR_NETNAME_DELETED, syscall.WSAECONNRESET:
|
|
|
|
// ignore these and try again
|
|
|
|
default:
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-29 21:23:39 -06:00
|
|
|
// Get local and peer addr out of AcceptEx buffer.
|
2011-02-22 20:40:24 -07:00
|
|
|
var lrsa, rrsa *syscall.RawSockaddrAny
|
|
|
|
var llen, rlen int32
|
2013-08-06 04:40:10 -06:00
|
|
|
syscall.GetAcceptExSockaddrs((*byte)(unsafe.Pointer(&rawsa[0])),
|
|
|
|
0, uint32(o.rsan), uint32(o.rsan), &lrsa, &llen, &rrsa, &rlen)
|
2011-02-22 20:40:24 -07:00
|
|
|
lsa, _ := lrsa.Sockaddr()
|
|
|
|
rsa, _ := rrsa.Sockaddr()
|
|
|
|
|
2014-08-04 15:10:46 -06:00
|
|
|
netfd.setAddr(netfd.addrFunc()(lsa), netfd.addrFunc()(rsa))
|
2012-01-31 08:36:45 -07:00
|
|
|
return netfd, nil
|
2011-01-11 21:55:17 -07:00
|
|
|
}
|
|
|
|
|
2011-05-30 02:02:59 -06:00
|
|
|
// Unimplemented functions.
|
2010-11-05 12:02:03 -06:00
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) dup() (*os.File, error) {
|
2010-11-05 12:02:03 -06:00
|
|
|
// TODO: Implement this
|
|
|
|
return nil, os.NewSyscallError("dup", syscall.EWINDOWS)
|
|
|
|
}
|
2010-12-07 11:40:14 -07:00
|
|
|
|
2012-02-16 16:04:29 -07:00
|
|
|
var errNoSupport = errors.New("address family not supported")
|
|
|
|
|
2014-04-02 18:06:51 -06:00
|
|
|
func (fd *netFD) readMsg(p []byte, oob []byte) (n, oobn, flags int, sa syscall.Sockaddr, err error) {
|
2012-02-16 16:04:29 -07:00
|
|
|
return 0, 0, 0, nil, errNoSupport
|
2010-12-07 11:40:14 -07:00
|
|
|
}
|
|
|
|
|
2014-04-02 18:06:51 -06:00
|
|
|
func (fd *netFD) writeMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
|
2012-02-16 16:04:29 -07:00
|
|
|
return 0, 0, errNoSupport
|
2010-12-07 11:40:14 -07:00
|
|
|
}
|