2010-06-29 21:23:39 -06:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package net
|
|
|
|
|
|
|
|
import (
|
2012-02-13 22:57:57 -07:00
|
|
|
"errors"
|
2011-11-01 20:05:34 -06:00
|
|
|
"io"
|
2010-06-29 21:23:39 -06:00
|
|
|
"os"
|
2011-01-19 12:49:25 -07:00
|
|
|
"runtime"
|
2010-06-29 21:23:39 -06:00
|
|
|
"sync"
|
|
|
|
"syscall"
|
2011-01-19 12:49:25 -07:00
|
|
|
"time"
|
2010-06-29 21:23:39 -06:00
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2013-08-19 13:09:24 -06:00
|
|
|
var (
|
|
|
|
initErr error
|
|
|
|
ioSync uint64
|
|
|
|
)
|
2011-02-22 20:40:24 -07:00
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// CancelIo Windows API cancels all outstanding IO for a particular
|
|
|
|
// socket on current thread. To overcome that limitation, we run
|
|
|
|
// special goroutine, locked to OS single thread, that both starts
|
|
|
|
// and cancels IO. It means, there are 2 unavoidable thread switches
|
|
|
|
// for every IO.
|
|
|
|
// Some newer versions of Windows has new CancelIoEx API, that does
|
|
|
|
// not have that limitation and can be used from any thread. This
|
|
|
|
// package uses CancelIoEx API, if present, otherwise it fallback
|
|
|
|
// to CancelIo.
|
|
|
|
|
2013-08-08 07:36:43 -06:00
|
|
|
var (
|
|
|
|
canCancelIO bool // determines if CancelIoEx API is present
|
|
|
|
skipSyncNotif bool
|
|
|
|
hasLoadSetFileCompletionNotificationModes bool
|
|
|
|
)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
|
2012-11-06 22:58:20 -07:00
|
|
|
func sysInit() {
|
2011-02-22 20:40:24 -07:00
|
|
|
var d syscall.WSAData
|
2011-09-08 00:32:40 -06:00
|
|
|
e := syscall.WSAStartup(uint32(0x202), &d)
|
2011-12-07 18:07:21 -07:00
|
|
|
if e != nil {
|
|
|
|
initErr = os.NewSyscallError("WSAStartup", e)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
canCancelIO = syscall.LoadCancelIoEx() == nil
|
2012-11-06 22:58:20 -07:00
|
|
|
if syscall.LoadGetAddrInfo() == nil {
|
2013-01-17 23:05:04 -07:00
|
|
|
lookupPort = newLookupPort
|
2012-11-06 22:58:20 -07:00
|
|
|
lookupIP = newLookupIP
|
|
|
|
}
|
2013-08-08 07:36:43 -06:00
|
|
|
|
|
|
|
hasLoadSetFileCompletionNotificationModes = syscall.LoadSetFileCompletionNotificationModes() == nil
|
|
|
|
if hasLoadSetFileCompletionNotificationModes {
|
|
|
|
// It's not safe to use FILE_SKIP_COMPLETION_PORT_ON_SUCCESS if non IFS providers are installed:
|
|
|
|
// http://support.microsoft.com/kb/2568167
|
|
|
|
skipSyncNotif = true
|
|
|
|
protos := [2]int32{syscall.IPPROTO_TCP, 0}
|
|
|
|
var buf [32]syscall.WSAProtocolInfo
|
|
|
|
len := uint32(unsafe.Sizeof(buf))
|
|
|
|
n, err := syscall.WSAEnumProtocols(&protos[0], &buf[0], &len)
|
|
|
|
if err != nil {
|
|
|
|
skipSyncNotif = false
|
|
|
|
} else {
|
|
|
|
for i := int32(0); i < n; i++ {
|
|
|
|
if buf[i].ServiceFlags1&syscall.XP1_IFS_HANDLES == 0 {
|
|
|
|
skipSyncNotif = false
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func closesocket(s syscall.Handle) error {
|
2011-07-01 08:18:07 -06:00
|
|
|
return syscall.Closesocket(s)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2013-01-10 18:42:09 -07:00
|
|
|
func canUseConnectEx(net string) bool {
|
2013-08-23 04:31:24 -06:00
|
|
|
switch net {
|
|
|
|
case "udp", "udp4", "udp6", "ip", "ip4", "ip6":
|
2013-01-10 18:42:09 -07:00
|
|
|
// ConnectEx windows API does not support connectionless sockets.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return syscall.LoadConnectEx() == nil
|
|
|
|
}
|
|
|
|
|
2013-04-02 14:24:16 -06:00
|
|
|
func resolveAndDial(net, addr string, localAddr Addr, deadline time.Time) (Conn, error) {
|
2013-01-10 18:42:09 -07:00
|
|
|
if !canUseConnectEx(net) {
|
|
|
|
// Use the relatively inefficient goroutine-racing
|
|
|
|
// implementation of DialTimeout.
|
2013-04-02 14:24:16 -06:00
|
|
|
return resolveAndDialChannel(net, addr, localAddr, deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
2013-02-08 05:53:10 -07:00
|
|
|
ra, err := resolveAddr("dial", net, addr, deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
if err != nil {
|
2013-08-13 16:04:39 -06:00
|
|
|
return nil, &OpError{Op: "dial", Net: net, Addr: nil, Err: err}
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
2013-08-29 18:09:45 -06:00
|
|
|
return dial(net, addr, localAddr, ra.toAddr(), deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// operation contains superset of data necessary to perform all async IO.
|
|
|
|
type operation struct {
|
2011-02-22 20:40:24 -07:00
|
|
|
// Used by IOCP interface, it must be first field
|
|
|
|
// of the struct, as our code rely on it.
|
|
|
|
o syscall.Overlapped
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2013-07-21 20:49:57 -06:00
|
|
|
// fields used by runtime.netpoll
|
|
|
|
runtimeCtx uintptr
|
|
|
|
mode int32
|
|
|
|
errno int32
|
|
|
|
qty uint32
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// fields used only by net package
|
2013-07-21 20:49:57 -06:00
|
|
|
fd *netFD
|
2013-08-06 04:40:10 -06:00
|
|
|
errc chan error
|
|
|
|
buf syscall.WSABuf
|
|
|
|
sa syscall.Sockaddr
|
|
|
|
rsa *syscall.RawSockaddrAny
|
|
|
|
rsan int32
|
|
|
|
handle syscall.Handle
|
|
|
|
flags uint32
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
func (o *operation) InitBuf(buf []byte) {
|
2011-02-22 20:40:24 -07:00
|
|
|
o.buf.Len = uint32(len(buf))
|
2013-08-06 04:40:10 -06:00
|
|
|
o.buf.Buf = nil
|
|
|
|
if len(buf) != 0 {
|
2011-02-22 20:40:24 -07:00
|
|
|
o.buf.Buf = (*byte)(unsafe.Pointer(&buf[0]))
|
|
|
|
}
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// ioSrv executes net IO requests.
|
2011-02-22 20:40:24 -07:00
|
|
|
type ioSrv struct {
|
2013-08-06 04:40:10 -06:00
|
|
|
req chan ioSrvReq
|
|
|
|
}
|
|
|
|
|
|
|
|
type ioSrvReq struct {
|
|
|
|
o *operation
|
|
|
|
submit func(o *operation) error // if nil, cancel the operation
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// ProcessRemoteIO will execute submit IO requests on behalf
|
2011-02-22 20:40:24 -07:00
|
|
|
// of other goroutines, all on a single os thread, so it can
|
|
|
|
// cancel them later. Results of all operations will be sent
|
|
|
|
// back to their requesters via channel supplied in request.
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// It is used only when the CancelIoEx API is unavailable.
|
2011-02-22 20:40:24 -07:00
|
|
|
func (s *ioSrv) ProcessRemoteIO() {
|
|
|
|
runtime.LockOSThread()
|
|
|
|
defer runtime.UnlockOSThread()
|
2013-08-06 04:40:10 -06:00
|
|
|
for r := range s.req {
|
|
|
|
if r.submit != nil {
|
|
|
|
r.o.errc <- r.submit(r.o)
|
|
|
|
} else {
|
|
|
|
r.o.errc <- syscall.CancelIo(r.o.fd.sysfd)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// ExecIO executes a single IO operation o. It submits and cancels
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// IO in the current thread for systems where Windows CancelIoEx API
|
|
|
|
// is available. Alternatively, it passes the request onto
|
2013-07-21 20:49:57 -06:00
|
|
|
// runtime netpoll and waits for completion or cancels request.
|
2013-08-06 04:40:10 -06:00
|
|
|
func (s *ioSrv) ExecIO(o *operation, name string, submit func(o *operation) error) (int, error) {
|
|
|
|
fd := o.fd
|
2013-07-21 20:49:57 -06:00
|
|
|
// Notify runtime netpoll about starting IO.
|
2013-08-06 04:40:10 -06:00
|
|
|
err := fd.pd.Prepare(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if err != nil {
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2012-11-24 16:02:57 -07:00
|
|
|
}
|
|
|
|
// Start IO.
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if canCancelIO {
|
2013-08-06 04:40:10 -06:00
|
|
|
err = submit(o)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
} else {
|
2011-02-22 20:40:24 -07:00
|
|
|
// Send request to a special dedicated thread,
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// so it can stop the IO with CancelIO later.
|
2013-08-06 04:40:10 -06:00
|
|
|
s.req <- ioSrvReq{o, submit}
|
|
|
|
err = <-o.errc
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2012-01-31 08:36:45 -07:00
|
|
|
switch err {
|
2011-11-13 20:44:52 -07:00
|
|
|
case nil:
|
2013-08-08 07:36:43 -06:00
|
|
|
// IO completed immediately
|
|
|
|
if o.fd.skipSyncNotif {
|
|
|
|
// No completion message will follow, so return immediately.
|
|
|
|
return int(o.qty), nil
|
|
|
|
}
|
|
|
|
// Need to get our completion message anyway.
|
2011-02-22 20:40:24 -07:00
|
|
|
case syscall.ERROR_IO_PENDING:
|
2011-07-13 11:54:51 -06:00
|
|
|
// IO started, and we have to wait for its completion.
|
2012-01-31 09:20:34 -07:00
|
|
|
err = nil
|
2011-02-22 20:40:24 -07:00
|
|
|
default:
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// Wait for our request to complete.
|
2013-08-06 04:40:10 -06:00
|
|
|
err = fd.pd.Wait(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if err == nil {
|
|
|
|
// All is good. Extract our IO results and return.
|
|
|
|
if o.errno != 0 {
|
|
|
|
err = syscall.Errno(o.errno)
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
return int(o.qty), nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
// IO is interrupted by "close" or "timeout"
|
|
|
|
netpollErr := err
|
|
|
|
switch netpollErr {
|
|
|
|
case errClosing, errTimeout:
|
|
|
|
// will deal with those.
|
|
|
|
default:
|
|
|
|
panic("net: unexpected runtime.netpoll error: " + netpollErr.Error())
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
// Cancel our request.
|
|
|
|
if canCancelIO {
|
2013-08-06 04:40:10 -06:00
|
|
|
err := syscall.CancelIoEx(fd.sysfd, &o.o)
|
2013-07-21 20:49:57 -06:00
|
|
|
// Assuming ERROR_NOT_FOUND is returned, if IO is completed.
|
|
|
|
if err != nil && err != syscall.ERROR_NOT_FOUND {
|
|
|
|
// TODO(brainman): maybe do something else, but panic.
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
} else {
|
2013-08-06 04:40:10 -06:00
|
|
|
s.req <- ioSrvReq{o, nil}
|
|
|
|
<-o.errc
|
2013-07-21 20:49:57 -06:00
|
|
|
}
|
|
|
|
// Wait for cancellation to complete.
|
2013-08-06 04:40:10 -06:00
|
|
|
fd.pd.WaitCanceled(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if o.errno != 0 {
|
|
|
|
err = syscall.Errno(o.errno)
|
|
|
|
if err == syscall.ERROR_OPERATION_ABORTED { // IO Canceled
|
|
|
|
err = netpollErr
|
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2013-07-21 20:49:57 -06:00
|
|
|
}
|
|
|
|
// We issued cancellation request. But, it seems, IO operation succeeded
|
|
|
|
// before cancellation request run. We need to treat IO operation as
|
|
|
|
// succeeded (the bytes are actually sent/recv from network).
|
|
|
|
return int(o.qty), nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// Start helper goroutines.
|
2013-08-26 22:53:57 -06:00
|
|
|
var rsrv, wsrv *ioSrv
|
2010-08-05 14:14:41 -06:00
|
|
|
var onceStartServer sync.Once
|
2010-06-29 21:23:39 -06:00
|
|
|
|
|
|
|
func startServer() {
|
2013-08-26 22:53:57 -06:00
|
|
|
rsrv = new(ioSrv)
|
|
|
|
wsrv = new(ioSrv)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if !canCancelIO {
|
2013-08-26 22:53:57 -06:00
|
|
|
// Only CancelIo API is available. Lets start two special goroutines
|
|
|
|
// locked to an OS thread, that both starts and cancels IO. One will
|
|
|
|
// process read requests, while other will do writes.
|
|
|
|
rsrv.req = make(chan ioSrvReq)
|
|
|
|
go rsrv.ProcessRemoteIO()
|
|
|
|
wsrv.req = make(chan ioSrvReq)
|
|
|
|
go wsrv.ProcessRemoteIO()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// Network file descriptor.
|
|
|
|
type netFD struct {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
// locking/lifetime of sysfd + serialize access to Read and Write methods
|
|
|
|
fdmu fdMutex
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// immutable until Close
|
2013-08-08 07:36:43 -06:00
|
|
|
sysfd syscall.Handle
|
|
|
|
family int
|
|
|
|
sotype int
|
|
|
|
isConnected bool
|
|
|
|
skipSyncNotif bool
|
|
|
|
net string
|
|
|
|
laddr Addr
|
|
|
|
raddr Addr
|
2011-02-22 20:40:24 -07:00
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
rop operation // read operation
|
|
|
|
wop operation // write operation
|
2012-12-04 21:59:01 -07:00
|
|
|
|
2013-07-21 20:49:57 -06:00
|
|
|
// wait server
|
|
|
|
pd pollDesc
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
func newFD(sysfd syscall.Handle, family, sotype int, net string) (*netFD, error) {
|
2013-07-21 20:49:57 -06:00
|
|
|
if initErr != nil {
|
|
|
|
return nil, initErr
|
|
|
|
}
|
|
|
|
onceStartServer.Do(startServer)
|
2013-08-06 08:42:33 -06:00
|
|
|
return &netFD{sysfd: sysfd, family: family, sotype: sotype, net: net}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (fd *netFD) init() error {
|
2013-08-06 04:40:10 -06:00
|
|
|
if err := fd.pd.Init(fd); err != nil {
|
2013-08-06 08:42:33 -06:00
|
|
|
return err
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
2013-08-08 07:36:43 -06:00
|
|
|
if hasLoadSetFileCompletionNotificationModes {
|
|
|
|
// We do not use events, so we can skip them always.
|
|
|
|
flags := uint8(syscall.FILE_SKIP_SET_EVENT_ON_HANDLE)
|
|
|
|
// It's not safe to skip completion notifications for UDP:
|
|
|
|
// http://blogs.technet.com/b/winserverperformance/archive/2008/06/26/designing-applications-for-high-performance-part-iii.aspx
|
|
|
|
if skipSyncNotif && fd.net == "tcp" {
|
|
|
|
flags |= syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS
|
|
|
|
}
|
|
|
|
err := syscall.SetFileCompletionNotificationModes(fd.sysfd, flags)
|
|
|
|
if err == nil && flags&syscall.FILE_SKIP_COMPLETION_PORT_ON_SUCCESS != 0 {
|
|
|
|
fd.skipSyncNotif = true
|
|
|
|
}
|
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
fd.rop.mode = 'r'
|
|
|
|
fd.wop.mode = 'w'
|
|
|
|
fd.rop.fd = fd
|
|
|
|
fd.wop.fd = fd
|
|
|
|
fd.rop.runtimeCtx = fd.pd.runtimeCtx
|
|
|
|
fd.wop.runtimeCtx = fd.pd.runtimeCtx
|
|
|
|
if !canCancelIO {
|
|
|
|
fd.rop.errc = make(chan error)
|
2013-08-06 21:36:41 -06:00
|
|
|
fd.wop.errc = make(chan error)
|
2013-08-06 04:40:10 -06:00
|
|
|
}
|
2013-08-06 08:42:33 -06:00
|
|
|
return nil
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fd *netFD) setAddr(laddr, raddr Addr) {
|
|
|
|
fd.laddr = laddr
|
|
|
|
fd.raddr = raddr
|
2013-07-29 10:01:13 -06:00
|
|
|
runtime.SetFinalizer(fd, (*netFD).Close)
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
|
|
|
|
2013-04-30 18:47:39 -06:00
|
|
|
func (fd *netFD) connect(la, ra syscall.Sockaddr) error {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
// Do not need to call fd.writeLock here,
|
|
|
|
// because fd is not yet accessible to user,
|
|
|
|
// so no concurrent operations are possible.
|
2013-01-10 18:42:09 -07:00
|
|
|
if !canUseConnectEx(fd.net) {
|
|
|
|
return syscall.Connect(fd.sysfd, ra)
|
|
|
|
}
|
|
|
|
// ConnectEx windows API requires an unconnected, previously bound socket.
|
2013-04-30 18:47:39 -06:00
|
|
|
if la == nil {
|
|
|
|
switch ra.(type) {
|
|
|
|
case *syscall.SockaddrInet4:
|
|
|
|
la = &syscall.SockaddrInet4{}
|
|
|
|
case *syscall.SockaddrInet6:
|
|
|
|
la = &syscall.SockaddrInet6{}
|
|
|
|
default:
|
|
|
|
panic("unexpected type in connect")
|
|
|
|
}
|
|
|
|
if err := syscall.Bind(fd.sysfd, la); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
|
|
|
// Call ConnectEx API.
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.sa = ra
|
2013-08-26 22:53:57 -06:00
|
|
|
_, err := wsrv.ExecIO(o, "ConnectEx", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.ConnectEx(o.fd.sysfd, o.sa, nil, 0, nil, &o.o)
|
|
|
|
})
|
2013-01-10 18:42:09 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Refresh socket properties.
|
|
|
|
return syscall.Setsockopt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_UPDATE_CONNECT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
func (fd *netFD) destroy() {
|
|
|
|
if fd.sysfd == syscall.InvalidHandle {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
// Poller may want to unregister fd in readiness notification mechanism,
|
|
|
|
// so this must be executed before closesocket.
|
|
|
|
fd.pd.Close()
|
|
|
|
closesocket(fd.sysfd)
|
|
|
|
fd.sysfd = syscall.InvalidHandle
|
|
|
|
// no need for a finalizer anymore
|
|
|
|
runtime.SetFinalizer(fd, nil)
|
|
|
|
}
|
|
|
|
|
2010-06-29 21:23:39 -06:00
|
|
|
// Add a reference to this fd.
|
2012-02-13 22:40:37 -07:00
|
|
|
// Returns an error if the fd cannot be used.
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
func (fd *netFD) incref() error {
|
|
|
|
if !fd.fdmu.Incref() {
|
2012-02-13 22:40:37 -07:00
|
|
|
return errClosing
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Remove a reference to this FD and close if we've been asked to do so
|
|
|
|
// (and there are no references left).
|
|
|
|
func (fd *netFD) decref() {
|
|
|
|
if fd.fdmu.Decref() {
|
|
|
|
fd.destroy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a reference to this fd and lock for reading.
|
|
|
|
// Returns an error if the fd cannot be used.
|
|
|
|
func (fd *netFD) readLock() error {
|
|
|
|
if !fd.fdmu.RWLock(true) {
|
2012-02-13 22:40:37 -07:00
|
|
|
return errClosing
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// Unlock for reading and remove a reference to this FD.
|
|
|
|
func (fd *netFD) readUnlock() {
|
|
|
|
if fd.fdmu.RWUnlock(true) {
|
|
|
|
fd.destroy()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Add a reference to this fd and lock for writing.
|
|
|
|
// Returns an error if the fd cannot be used.
|
|
|
|
func (fd *netFD) writeLock() error {
|
|
|
|
if !fd.fdmu.RWLock(false) {
|
|
|
|
return errClosing
|
2012-02-13 22:40:37 -07:00
|
|
|
}
|
|
|
|
return nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
// Unlock for writing and remove a reference to this FD.
|
|
|
|
func (fd *netFD) writeUnlock() {
|
|
|
|
if fd.fdmu.RWUnlock(false) {
|
|
|
|
fd.destroy()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) Close() error {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if !fd.fdmu.IncrefAndClose() {
|
|
|
|
return errClosing
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// unblock pending reader and writer
|
2013-07-21 20:49:57 -06:00
|
|
|
fd.pd.Evict()
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
fd.decref()
|
2010-06-29 21:23:39 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) shutdown(how int) error {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.incref(); err != nil {
|
2012-11-02 03:46:47 -06:00
|
|
|
return err
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
2012-11-02 03:46:47 -06:00
|
|
|
defer fd.decref()
|
2011-11-13 20:44:52 -07:00
|
|
|
err := syscall.Shutdown(fd.sysfd, how)
|
|
|
|
if err != nil {
|
|
|
|
return &OpError{"shutdown", fd.net, fd.laddr, err}
|
2011-10-12 11:45:25 -06:00
|
|
|
}
|
2011-09-28 09:12:38 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) CloseRead() error {
|
2011-10-12 11:45:25 -06:00
|
|
|
return fd.shutdown(syscall.SHUT_RD)
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) CloseWrite() error {
|
2011-10-12 11:45:25 -06:00
|
|
|
return fd.shutdown(syscall.SHUT_WR)
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) Read(buf []byte) (int, error) {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.readLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, err
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.readUnlock()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.InitBuf(buf)
|
2013-08-26 22:53:57 -06:00
|
|
|
n, err := rsrv.ExecIO(o, "WSARecv", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
|
|
|
|
})
|
2010-07-21 00:51:07 -06:00
|
|
|
if err == nil && n == 0 {
|
2011-11-01 20:05:34 -06:00
|
|
|
err = io.EOF
|
2010-07-21 00:51:07 -06:00
|
|
|
}
|
2013-08-19 13:09:24 -06:00
|
|
|
if raceenabled {
|
|
|
|
raceAcquire(unsafe.Pointer(&ioSync))
|
|
|
|
}
|
2012-01-31 08:36:45 -07:00
|
|
|
return n, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) ReadFrom(buf []byte) (n int, sa syscall.Sockaddr, err error) {
|
2011-02-22 20:40:24 -07:00
|
|
|
if len(buf) == 0 {
|
2010-11-22 09:01:30 -07:00
|
|
|
return 0, nil, nil
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.readLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, nil, err
|
2010-11-22 09:01:30 -07:00
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.readUnlock()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.InitBuf(buf)
|
2013-08-26 22:53:57 -06:00
|
|
|
n, err = rsrv.ExecIO(o, "WSARecvFrom", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
if o.rsa == nil {
|
|
|
|
o.rsa = new(syscall.RawSockaddrAny)
|
|
|
|
}
|
|
|
|
o.rsan = int32(unsafe.Sizeof(*o.rsa))
|
|
|
|
return syscall.WSARecvFrom(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
|
|
|
|
})
|
2011-07-25 19:55:52 -06:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
2011-02-22 20:40:24 -07:00
|
|
|
sa, _ = o.rsa.Sockaddr()
|
2010-11-22 09:01:30 -07:00
|
|
|
return
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) Write(buf []byte) (int, error) {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.writeLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.writeUnlock()
|
2013-08-19 13:09:24 -06:00
|
|
|
if raceenabled {
|
|
|
|
raceReleaseMerge(unsafe.Pointer(&ioSync))
|
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.InitBuf(buf)
|
2013-08-26 22:53:57 -06:00
|
|
|
return wsrv.ExecIO(o, "WSASend", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.WSASend(o.fd.sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil)
|
|
|
|
})
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
|
2011-02-22 20:40:24 -07:00
|
|
|
if len(buf) == 0 {
|
2010-11-22 09:01:30 -07:00
|
|
|
return 0, nil
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.writeLock(); err != nil {
|
2012-02-13 22:40:37 -07:00
|
|
|
return 0, err
|
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.writeUnlock()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.InitBuf(buf)
|
2011-02-22 20:40:24 -07:00
|
|
|
o.sa = sa
|
2013-08-26 22:53:57 -06:00
|
|
|
return wsrv.ExecIO(o, "WSASendto", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.WSASendto(o.fd.sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
|
|
|
|
})
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (*netFD, error) {
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
if err := fd.readLock(); err != nil {
|
2012-02-13 22:57:57 -07:00
|
|
|
return nil, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
net: add special netFD mutex
The mutex, fdMutex, handles locking and lifetime of sysfd,
and serializes Read and Write methods.
This allows to strip 2 sync.Mutex.Lock calls,
2 sync.Mutex.Unlock calls, 1 defer and some amount
of misc overhead from every network operation.
On linux/amd64, Intel E5-2690:
benchmark old ns/op new ns/op delta
BenchmarkTCP4Persistent 9595 9454 -1.47%
BenchmarkTCP4Persistent-2 8978 8772 -2.29%
BenchmarkTCP4ConcurrentReadWrite 4900 4625 -5.61%
BenchmarkTCP4ConcurrentReadWrite-2 2603 2500 -3.96%
In general it strips 70-500 ns from every network operation depending
on processor model. On my relatively new E5-2690 it accounts to ~5%
of network op cost.
Fixes #6074.
R=golang-dev, bradfitz, alex.brainman, iant, mikioh.mikioh
CC=golang-dev
https://golang.org/cl/12418043
2013-08-09 11:43:00 -06:00
|
|
|
defer fd.readUnlock()
|
2010-06-29 21:23:39 -06:00
|
|
|
|
|
|
|
// Get new socket.
|
2013-02-03 22:03:41 -07:00
|
|
|
s, err := sysSocket(fd.family, fd.sotype, 0)
|
2012-01-31 08:36:45 -07:00
|
|
|
if err != nil {
|
2012-12-04 21:13:03 -07:00
|
|
|
return nil, &OpError{"socket", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Associate our new socket with IOCP.
|
2013-07-21 20:49:57 -06:00
|
|
|
netfd, err := newFD(s, fd.family, fd.sotype, fd.net)
|
|
|
|
if err != nil {
|
2012-12-04 21:13:03 -07:00
|
|
|
closesocket(s)
|
2013-07-21 20:49:57 -06:00
|
|
|
return nil, &OpError{"accept", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
2013-08-06 08:42:33 -06:00
|
|
|
if err := netfd.init(); err != nil {
|
|
|
|
fd.Close()
|
|
|
|
return nil, err
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
|
|
|
// Submit accept request.
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.handle = s
|
|
|
|
var rawsa [2]syscall.RawSockaddrAny
|
|
|
|
o.rsan = int32(unsafe.Sizeof(rawsa[0]))
|
2013-08-26 22:53:57 -06:00
|
|
|
_, err = rsrv.ExecIO(o, "AcceptEx", func(o *operation) error {
|
2013-08-06 04:40:10 -06:00
|
|
|
return syscall.AcceptEx(o.fd.sysfd, o.handle, (*byte)(unsafe.Pointer(&rawsa[0])), 0, uint32(o.rsan), uint32(o.rsan), &o.qty, &o.o)
|
|
|
|
})
|
2011-02-22 20:40:24 -07:00
|
|
|
if err != nil {
|
2013-07-29 10:01:13 -06:00
|
|
|
netfd.Close()
|
2011-02-22 20:40:24 -07:00
|
|
|
return nil, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inherit properties of the listening socket.
|
2012-01-31 08:36:45 -07:00
|
|
|
err = syscall.Setsockopt(s, syscall.SOL_SOCKET, syscall.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
|
|
|
|
if err != nil {
|
2013-07-29 10:01:13 -06:00
|
|
|
netfd.Close()
|
2012-12-04 21:13:03 -07:00
|
|
|
return nil, &OpError{"Setsockopt", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get local and peer addr out of AcceptEx buffer.
|
2011-02-22 20:40:24 -07:00
|
|
|
var lrsa, rrsa *syscall.RawSockaddrAny
|
|
|
|
var llen, rlen int32
|
2013-08-06 04:40:10 -06:00
|
|
|
syscall.GetAcceptExSockaddrs((*byte)(unsafe.Pointer(&rawsa[0])),
|
|
|
|
0, uint32(o.rsan), uint32(o.rsan), &lrsa, &llen, &rrsa, &rlen)
|
2011-02-22 20:40:24 -07:00
|
|
|
lsa, _ := lrsa.Sockaddr()
|
|
|
|
rsa, _ := rrsa.Sockaddr()
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
netfd.setAddr(toAddr(lsa), toAddr(rsa))
|
|
|
|
return netfd, nil
|
2011-01-11 21:55:17 -07:00
|
|
|
}
|
|
|
|
|
2011-05-30 02:02:59 -06:00
|
|
|
// Unimplemented functions.
|
2010-11-05 12:02:03 -06:00
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) dup() (*os.File, error) {
|
2010-11-05 12:02:03 -06:00
|
|
|
// TODO: Implement this
|
|
|
|
return nil, os.NewSyscallError("dup", syscall.EWINDOWS)
|
|
|
|
}
|
2010-12-07 11:40:14 -07:00
|
|
|
|
2012-02-16 16:04:29 -07:00
|
|
|
var errNoSupport = errors.New("address family not supported")
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) ReadMsg(p []byte, oob []byte) (n, oobn, flags int, sa syscall.Sockaddr, err error) {
|
2012-02-16 16:04:29 -07:00
|
|
|
return 0, 0, 0, nil, errNoSupport
|
2010-12-07 11:40:14 -07:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
|
2012-02-16 16:04:29 -07:00
|
|
|
return 0, 0, errNoSupport
|
2010-12-07 11:40:14 -07:00
|
|
|
}
|