2010-06-29 21:23:39 -06:00
|
|
|
// Copyright 2010 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package net
|
|
|
|
|
|
|
|
import (
|
2012-02-13 22:57:57 -07:00
|
|
|
"errors"
|
2011-11-01 20:05:34 -06:00
|
|
|
"io"
|
2010-06-29 21:23:39 -06:00
|
|
|
"os"
|
2011-01-19 12:49:25 -07:00
|
|
|
"runtime"
|
2010-06-29 21:23:39 -06:00
|
|
|
"sync"
|
|
|
|
"syscall"
|
2011-01-19 12:49:25 -07:00
|
|
|
"time"
|
2010-06-29 21:23:39 -06:00
|
|
|
"unsafe"
|
|
|
|
)
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
var initErr error
|
2011-02-22 20:40:24 -07:00
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// CancelIo Windows API cancels all outstanding IO for a particular
|
|
|
|
// socket on current thread. To overcome that limitation, we run
|
|
|
|
// special goroutine, locked to OS single thread, that both starts
|
|
|
|
// and cancels IO. It means, there are 2 unavoidable thread switches
|
|
|
|
// for every IO.
|
|
|
|
// Some newer versions of Windows has new CancelIoEx API, that does
|
|
|
|
// not have that limitation and can be used from any thread. This
|
|
|
|
// package uses CancelIoEx API, if present, otherwise it fallback
|
|
|
|
// to CancelIo.
|
|
|
|
|
|
|
|
var canCancelIO bool // determines if CancelIoEx API is present
|
|
|
|
|
2012-11-06 22:58:20 -07:00
|
|
|
func sysInit() {
|
2011-02-22 20:40:24 -07:00
|
|
|
var d syscall.WSAData
|
2011-09-08 00:32:40 -06:00
|
|
|
e := syscall.WSAStartup(uint32(0x202), &d)
|
2011-12-07 18:07:21 -07:00
|
|
|
if e != nil {
|
|
|
|
initErr = os.NewSyscallError("WSAStartup", e)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
canCancelIO = syscall.LoadCancelIoEx() == nil
|
2012-11-06 22:58:20 -07:00
|
|
|
if syscall.LoadGetAddrInfo() == nil {
|
2013-01-17 23:05:04 -07:00
|
|
|
lookupPort = newLookupPort
|
2012-11-06 22:58:20 -07:00
|
|
|
lookupIP = newLookupIP
|
|
|
|
}
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func closesocket(s syscall.Handle) error {
|
2011-07-01 08:18:07 -06:00
|
|
|
return syscall.Closesocket(s)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2013-01-10 18:42:09 -07:00
|
|
|
func canUseConnectEx(net string) bool {
|
|
|
|
if net == "udp" || net == "udp4" || net == "udp6" {
|
|
|
|
// ConnectEx windows API does not support connectionless sockets.
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
return syscall.LoadConnectEx() == nil
|
|
|
|
}
|
|
|
|
|
2013-04-02 14:24:16 -06:00
|
|
|
func resolveAndDial(net, addr string, localAddr Addr, deadline time.Time) (Conn, error) {
|
2013-01-10 18:42:09 -07:00
|
|
|
if !canUseConnectEx(net) {
|
|
|
|
// Use the relatively inefficient goroutine-racing
|
|
|
|
// implementation of DialTimeout.
|
2013-04-02 14:24:16 -06:00
|
|
|
return resolveAndDialChannel(net, addr, localAddr, deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
2013-02-08 05:53:10 -07:00
|
|
|
ra, err := resolveAddr("dial", net, addr, deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2013-04-02 14:24:16 -06:00
|
|
|
return dial(net, addr, localAddr, ra, deadline)
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// operation contains superset of data necessary to perform all async IO.
|
|
|
|
type operation struct {
|
2011-02-22 20:40:24 -07:00
|
|
|
// Used by IOCP interface, it must be first field
|
|
|
|
// of the struct, as our code rely on it.
|
|
|
|
o syscall.Overlapped
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2013-07-21 20:49:57 -06:00
|
|
|
// fields used by runtime.netpoll
|
|
|
|
runtimeCtx uintptr
|
|
|
|
mode int32
|
|
|
|
errno int32
|
|
|
|
qty uint32
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// fields used only by net package
|
|
|
|
mu sync.Mutex
|
2013-07-21 20:49:57 -06:00
|
|
|
fd *netFD
|
2013-08-06 04:40:10 -06:00
|
|
|
errc chan error
|
|
|
|
buf syscall.WSABuf
|
|
|
|
sa syscall.Sockaddr
|
|
|
|
rsa *syscall.RawSockaddrAny
|
|
|
|
rsan int32
|
|
|
|
handle syscall.Handle
|
|
|
|
flags uint32
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
func (o *operation) InitBuf(buf []byte) {
|
2011-02-22 20:40:24 -07:00
|
|
|
o.buf.Len = uint32(len(buf))
|
2013-08-06 04:40:10 -06:00
|
|
|
o.buf.Buf = nil
|
|
|
|
if len(buf) != 0 {
|
2011-02-22 20:40:24 -07:00
|
|
|
o.buf.Buf = (*byte)(unsafe.Pointer(&buf[0]))
|
|
|
|
}
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// ioSrv executes net IO requests.
|
2011-02-22 20:40:24 -07:00
|
|
|
type ioSrv struct {
|
2013-08-06 04:40:10 -06:00
|
|
|
req chan ioSrvReq
|
|
|
|
}
|
|
|
|
|
|
|
|
type ioSrvReq struct {
|
|
|
|
o *operation
|
|
|
|
submit func(o *operation) error // if nil, cancel the operation
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// ProcessRemoteIO will execute submit IO requests on behalf
|
2011-02-22 20:40:24 -07:00
|
|
|
// of other goroutines, all on a single os thread, so it can
|
|
|
|
// cancel them later. Results of all operations will be sent
|
|
|
|
// back to their requesters via channel supplied in request.
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// It is used only when the CancelIoEx API is unavailable.
|
2011-02-22 20:40:24 -07:00
|
|
|
func (s *ioSrv) ProcessRemoteIO() {
|
|
|
|
runtime.LockOSThread()
|
|
|
|
defer runtime.UnlockOSThread()
|
2013-08-06 04:40:10 -06:00
|
|
|
for r := range s.req {
|
|
|
|
if r.submit != nil {
|
|
|
|
r.o.errc <- r.submit(r.o)
|
|
|
|
} else {
|
|
|
|
r.o.errc <- syscall.CancelIo(r.o.fd.sysfd)
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
// ExecIO executes a single IO operation o. It submits and cancels
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// IO in the current thread for systems where Windows CancelIoEx API
|
|
|
|
// is available. Alternatively, it passes the request onto
|
2013-07-21 20:49:57 -06:00
|
|
|
// runtime netpoll and waits for completion or cancels request.
|
2013-08-06 04:40:10 -06:00
|
|
|
func (s *ioSrv) ExecIO(o *operation, name string, submit func(o *operation) error) (int, error) {
|
|
|
|
fd := o.fd
|
2013-07-21 20:49:57 -06:00
|
|
|
// Notify runtime netpoll about starting IO.
|
2013-08-06 04:40:10 -06:00
|
|
|
err := fd.pd.Prepare(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if err != nil {
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2012-11-24 16:02:57 -07:00
|
|
|
}
|
|
|
|
// Start IO.
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if canCancelIO {
|
2013-08-06 04:40:10 -06:00
|
|
|
err = submit(o)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
} else {
|
2011-02-22 20:40:24 -07:00
|
|
|
// Send request to a special dedicated thread,
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// so it can stop the IO with CancelIO later.
|
2013-08-06 04:40:10 -06:00
|
|
|
s.req <- ioSrvReq{o, submit}
|
|
|
|
err = <-o.errc
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2012-01-31 08:36:45 -07:00
|
|
|
switch err {
|
2011-11-13 20:44:52 -07:00
|
|
|
case nil:
|
2011-02-22 20:40:24 -07:00
|
|
|
// IO completed immediately, but we need to get our completion message anyway.
|
|
|
|
case syscall.ERROR_IO_PENDING:
|
2011-07-13 11:54:51 -06:00
|
|
|
// IO started, and we have to wait for its completion.
|
2012-01-31 09:20:34 -07:00
|
|
|
err = nil
|
2011-02-22 20:40:24 -07:00
|
|
|
default:
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// Wait for our request to complete.
|
2013-08-06 04:40:10 -06:00
|
|
|
err = fd.pd.Wait(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if err == nil {
|
|
|
|
// All is good. Extract our IO results and return.
|
|
|
|
if o.errno != 0 {
|
|
|
|
err = syscall.Errno(o.errno)
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
return int(o.qty), nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
// IO is interrupted by "close" or "timeout"
|
|
|
|
netpollErr := err
|
|
|
|
switch netpollErr {
|
|
|
|
case errClosing, errTimeout:
|
|
|
|
// will deal with those.
|
|
|
|
default:
|
|
|
|
panic("net: unexpected runtime.netpoll error: " + netpollErr.Error())
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2013-07-21 20:49:57 -06:00
|
|
|
// Cancel our request.
|
|
|
|
if canCancelIO {
|
2013-08-06 04:40:10 -06:00
|
|
|
err := syscall.CancelIoEx(fd.sysfd, &o.o)
|
2013-07-21 20:49:57 -06:00
|
|
|
// Assuming ERROR_NOT_FOUND is returned, if IO is completed.
|
|
|
|
if err != nil && err != syscall.ERROR_NOT_FOUND {
|
|
|
|
// TODO(brainman): maybe do something else, but panic.
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
} else {
|
2013-08-06 04:40:10 -06:00
|
|
|
s.req <- ioSrvReq{o, nil}
|
|
|
|
<-o.errc
|
2013-07-21 20:49:57 -06:00
|
|
|
}
|
|
|
|
// Wait for cancellation to complete.
|
2013-08-06 04:40:10 -06:00
|
|
|
fd.pd.WaitCanceled(int(o.mode))
|
2013-07-21 20:49:57 -06:00
|
|
|
if o.errno != 0 {
|
|
|
|
err = syscall.Errno(o.errno)
|
|
|
|
if err == syscall.ERROR_OPERATION_ABORTED { // IO Canceled
|
|
|
|
err = netpollErr
|
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
return 0, &OpError{name, fd.net, fd.laddr, err}
|
2013-07-21 20:49:57 -06:00
|
|
|
}
|
|
|
|
// We issued cancellation request. But, it seems, IO operation succeeded
|
|
|
|
// before cancellation request run. We need to treat IO operation as
|
|
|
|
// succeeded (the bytes are actually sent/recv from network).
|
|
|
|
return int(o.qty), nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// Start helper goroutines.
|
|
|
|
var iosrv *ioSrv
|
2010-08-05 14:14:41 -06:00
|
|
|
var onceStartServer sync.Once
|
2010-06-29 21:23:39 -06:00
|
|
|
|
|
|
|
func startServer() {
|
2011-02-22 20:40:24 -07:00
|
|
|
iosrv = new(ioSrv)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if !canCancelIO {
|
|
|
|
// Only CancelIo API is available. Lets start special goroutine
|
|
|
|
// locked to an OS thread, that both starts and cancels IO.
|
2013-08-06 04:40:10 -06:00
|
|
|
iosrv.req = make(chan ioSrvReq)
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
go iosrv.ProcessRemoteIO()
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// Network file descriptor.
|
|
|
|
type netFD struct {
|
|
|
|
// locking/lifetime of sysfd
|
|
|
|
sysmu sync.Mutex
|
|
|
|
sysref int
|
|
|
|
closing bool
|
2010-06-29 21:23:39 -06:00
|
|
|
|
2011-02-22 20:40:24 -07:00
|
|
|
// immutable until Close
|
2012-01-26 09:31:42 -07:00
|
|
|
sysfd syscall.Handle
|
|
|
|
family int
|
|
|
|
sotype int
|
|
|
|
isConnected bool
|
|
|
|
net string
|
|
|
|
laddr Addr
|
|
|
|
raddr Addr
|
2011-02-22 20:40:24 -07:00
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
rop operation // read operation
|
|
|
|
wop operation // write operation
|
2012-12-04 21:59:01 -07:00
|
|
|
|
2013-07-21 20:49:57 -06:00
|
|
|
// wait server
|
|
|
|
pd pollDesc
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
|
|
|
|
2013-08-06 04:40:10 -06:00
|
|
|
func newFD(sysfd syscall.Handle, family, sotype int, net string) (*netFD, error) {
|
2013-07-21 20:49:57 -06:00
|
|
|
if initErr != nil {
|
|
|
|
return nil, initErr
|
|
|
|
}
|
|
|
|
onceStartServer.Do(startServer)
|
2013-08-06 04:40:10 -06:00
|
|
|
fd := &netFD{
|
|
|
|
sysfd: sysfd,
|
2010-06-29 21:23:39 -06:00
|
|
|
family: family,
|
2012-01-19 16:33:37 -07:00
|
|
|
sotype: sotype,
|
2010-06-29 21:23:39 -06:00
|
|
|
net: net,
|
2011-02-22 20:40:24 -07:00
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
if err := fd.pd.Init(fd); err != nil {
|
2012-01-31 08:36:45 -07:00
|
|
|
return nil, err
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
2013-08-06 04:40:10 -06:00
|
|
|
fd.rop.mode = 'r'
|
|
|
|
fd.wop.mode = 'w'
|
|
|
|
fd.rop.fd = fd
|
|
|
|
fd.wop.fd = fd
|
|
|
|
fd.rop.runtimeCtx = fd.pd.runtimeCtx
|
|
|
|
fd.wop.runtimeCtx = fd.pd.runtimeCtx
|
|
|
|
if !canCancelIO {
|
|
|
|
fd.rop.errc = make(chan error)
|
|
|
|
fd.rop.errc = make(chan error)
|
|
|
|
}
|
|
|
|
return fd, nil
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func (fd *netFD) setAddr(laddr, raddr Addr) {
|
|
|
|
fd.laddr = laddr
|
|
|
|
fd.raddr = raddr
|
2013-07-29 10:01:13 -06:00
|
|
|
runtime.SetFinalizer(fd, (*netFD).Close)
|
2011-03-28 21:40:01 -06:00
|
|
|
}
|
|
|
|
|
2013-04-30 18:47:39 -06:00
|
|
|
func (fd *netFD) connect(la, ra syscall.Sockaddr) error {
|
2013-01-10 18:42:09 -07:00
|
|
|
if !canUseConnectEx(fd.net) {
|
|
|
|
return syscall.Connect(fd.sysfd, ra)
|
|
|
|
}
|
|
|
|
// ConnectEx windows API requires an unconnected, previously bound socket.
|
2013-04-30 18:47:39 -06:00
|
|
|
if la == nil {
|
|
|
|
switch ra.(type) {
|
|
|
|
case *syscall.SockaddrInet4:
|
|
|
|
la = &syscall.SockaddrInet4{}
|
|
|
|
case *syscall.SockaddrInet6:
|
|
|
|
la = &syscall.SockaddrInet6{}
|
|
|
|
default:
|
|
|
|
panic("unexpected type in connect")
|
|
|
|
}
|
|
|
|
if err := syscall.Bind(fd.sysfd, la); err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2013-01-10 18:42:09 -07:00
|
|
|
}
|
|
|
|
// Call ConnectEx API.
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.mu.Lock()
|
|
|
|
defer o.mu.Unlock()
|
|
|
|
o.sa = ra
|
|
|
|
_, err := iosrv.ExecIO(o, "ConnectEx", func(o *operation) error {
|
|
|
|
return syscall.ConnectEx(o.fd.sysfd, o.sa, nil, 0, nil, &o.o)
|
|
|
|
})
|
2013-01-10 18:42:09 -07:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Refresh socket properties.
|
|
|
|
return syscall.Setsockopt(fd.sysfd, syscall.SOL_SOCKET, syscall.SO_UPDATE_CONNECT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Add a reference to this fd.
|
2012-02-13 22:40:37 -07:00
|
|
|
// If closing==true, mark the fd as closing.
|
|
|
|
// Returns an error if the fd cannot be used.
|
|
|
|
func (fd *netFD) incref(closing bool) error {
|
|
|
|
if fd == nil {
|
|
|
|
return errClosing
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
fd.sysmu.Lock()
|
2012-02-13 22:40:37 -07:00
|
|
|
if fd.closing {
|
|
|
|
fd.sysmu.Unlock()
|
|
|
|
return errClosing
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
fd.sysref++
|
2012-02-13 22:40:37 -07:00
|
|
|
if closing {
|
|
|
|
fd.closing = true
|
|
|
|
}
|
|
|
|
closing = fd.closing
|
2010-06-29 21:23:39 -06:00
|
|
|
fd.sysmu.Unlock()
|
2012-02-13 22:40:37 -07:00
|
|
|
return nil
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Remove a reference to this FD and close if we've been asked to do so (and
|
|
|
|
// there are no references left.
|
|
|
|
func (fd *netFD) decref() {
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if fd == nil {
|
|
|
|
return
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
fd.sysmu.Lock()
|
|
|
|
fd.sysref--
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
if fd.closing && fd.sysref == 0 && fd.sysfd != syscall.InvalidHandle {
|
2013-07-29 10:01:13 -06:00
|
|
|
// Poller may want to unregister fd in readiness notification mechanism,
|
|
|
|
// so this must be executed before closesocket.
|
|
|
|
fd.pd.Close()
|
2011-01-11 21:55:17 -07:00
|
|
|
closesocket(fd.sysfd)
|
2011-07-01 08:18:07 -06:00
|
|
|
fd.sysfd = syscall.InvalidHandle
|
2011-01-11 21:55:17 -07:00
|
|
|
// no need for a finalizer anymore
|
|
|
|
runtime.SetFinalizer(fd, nil)
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
fd.sysmu.Unlock()
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) Close() error {
|
2012-02-13 22:40:37 -07:00
|
|
|
if err := fd.incref(true); err != nil {
|
|
|
|
return err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
defer fd.decref()
|
|
|
|
// unblock pending reader and writer
|
2013-07-21 20:49:57 -06:00
|
|
|
fd.pd.Evict()
|
net: fix connection resets when closed on windows
It is common to close network connection while another goroutine is
blocked reading on another goroutine. This sequence corresponds to
windows calls to WSARecv to start io, followed by GetQueuedCompletionStatus
that blocks until io completes, and, finally, closesocket called from
another thread. We were expecting that closesocket would unblock
GetQueuedCompletionStatus, and it does, but not always
(http://code.google.com/p/go/issues/detail?id=4170#c5). Also that sequence
results in connection is being reset.
This CL inserts CancelIo between GetQueuedCompletionStatus and closesocket,
and waits for both WSARecv and GetQueuedCompletionStatus to complete before
proceeding to closesocket. This seems to fix both connection resets and
issue 4170. It also makes windows code behave similar to unix version.
Unfortunately, CancelIo needs to be called on the same thread as WSARecv.
So we have to employ strategy we use for connections with deadlines to
every connection now. It means, there are 2 unavoidable thread switches
for every io. Some newer versions of windows have new CancelIoEx api that
doesn't have these drawbacks, and this CL uses this capability when available.
As time goes by, we should have less of CancelIo and more of CancelIoEx
systems. Computers with CancelIoEx are also not affected by issue 4195 anymore.
Fixes #3710
Fixes #3746
Fixes #4170
Partial fix for issue 4195
R=golang-dev, mikioh.mikioh, bradfitz, rsc
CC=golang-dev
https://golang.org/cl/6604072
2012-10-30 17:24:37 -06:00
|
|
|
// wait for both reader and writer to exit
|
2013-08-06 04:40:10 -06:00
|
|
|
fd.rop.mu.Lock()
|
|
|
|
fd.wop.mu.Lock()
|
|
|
|
fd.rop.mu.Unlock()
|
|
|
|
fd.wop.mu.Unlock()
|
2010-06-29 21:23:39 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) shutdown(how int) error {
|
2012-11-02 03:46:47 -06:00
|
|
|
if err := fd.incref(false); err != nil {
|
|
|
|
return err
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
2012-11-02 03:46:47 -06:00
|
|
|
defer fd.decref()
|
2011-11-13 20:44:52 -07:00
|
|
|
err := syscall.Shutdown(fd.sysfd, how)
|
|
|
|
if err != nil {
|
|
|
|
return &OpError{"shutdown", fd.net, fd.laddr, err}
|
2011-10-12 11:45:25 -06:00
|
|
|
}
|
2011-09-28 09:12:38 -06:00
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) CloseRead() error {
|
2011-10-12 11:45:25 -06:00
|
|
|
return fd.shutdown(syscall.SHUT_RD)
|
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) CloseWrite() error {
|
2011-10-12 11:45:25 -06:00
|
|
|
return fd.shutdown(syscall.SHUT_WR)
|
2011-09-28 09:12:38 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) Read(buf []byte) (int, error) {
|
2012-02-13 22:40:37 -07:00
|
|
|
if err := fd.incref(false); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2010-06-29 21:23:39 -06:00
|
|
|
defer fd.decref()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.mu.Lock()
|
|
|
|
defer o.mu.Unlock()
|
|
|
|
o.InitBuf(buf)
|
|
|
|
n, err := iosrv.ExecIO(o, "WSARecv", func(o *operation) error {
|
|
|
|
return syscall.WSARecv(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, &o.o, nil)
|
|
|
|
})
|
2010-07-21 00:51:07 -06:00
|
|
|
if err == nil && n == 0 {
|
2011-11-01 20:05:34 -06:00
|
|
|
err = io.EOF
|
2010-07-21 00:51:07 -06:00
|
|
|
}
|
2012-01-31 08:36:45 -07:00
|
|
|
return n, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) ReadFrom(buf []byte) (n int, sa syscall.Sockaddr, err error) {
|
2011-02-22 20:40:24 -07:00
|
|
|
if len(buf) == 0 {
|
2010-11-22 09:01:30 -07:00
|
|
|
return 0, nil, nil
|
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
if err := fd.incref(false); err != nil {
|
|
|
|
return 0, nil, err
|
2010-11-22 09:01:30 -07:00
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
defer fd.decref()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.mu.Lock()
|
|
|
|
defer o.mu.Unlock()
|
|
|
|
o.InitBuf(buf)
|
|
|
|
n, err = iosrv.ExecIO(o, "WSARecvFrom", func(o *operation) error {
|
|
|
|
if o.rsa == nil {
|
|
|
|
o.rsa = new(syscall.RawSockaddrAny)
|
|
|
|
}
|
|
|
|
o.rsan = int32(unsafe.Sizeof(*o.rsa))
|
|
|
|
return syscall.WSARecvFrom(o.fd.sysfd, &o.buf, 1, &o.qty, &o.flags, o.rsa, &o.rsan, &o.o, nil)
|
|
|
|
})
|
2011-07-25 19:55:52 -06:00
|
|
|
if err != nil {
|
|
|
|
return 0, nil, err
|
|
|
|
}
|
2011-02-22 20:40:24 -07:00
|
|
|
sa, _ = o.rsa.Sockaddr()
|
2010-11-22 09:01:30 -07:00
|
|
|
return
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) Write(buf []byte) (int, error) {
|
2012-02-13 22:40:37 -07:00
|
|
|
if err := fd.incref(false); err != nil {
|
|
|
|
return 0, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
defer fd.decref()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.mu.Lock()
|
|
|
|
defer o.mu.Unlock()
|
|
|
|
o.InitBuf(buf)
|
|
|
|
return iosrv.ExecIO(o, "WSASend", func(o *operation) error {
|
|
|
|
return syscall.WSASend(o.fd.sysfd, &o.buf, 1, &o.qty, 0, &o.o, nil)
|
|
|
|
})
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) WriteTo(buf []byte, sa syscall.Sockaddr) (int, error) {
|
2011-02-22 20:40:24 -07:00
|
|
|
if len(buf) == 0 {
|
2010-11-22 09:01:30 -07:00
|
|
|
return 0, nil
|
|
|
|
}
|
2012-02-13 22:40:37 -07:00
|
|
|
if err := fd.incref(false); err != nil {
|
|
|
|
return 0, err
|
|
|
|
}
|
2010-11-22 09:01:30 -07:00
|
|
|
defer fd.decref()
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.wop
|
|
|
|
o.mu.Lock()
|
|
|
|
defer o.mu.Unlock()
|
|
|
|
o.InitBuf(buf)
|
2011-02-22 20:40:24 -07:00
|
|
|
o.sa = sa
|
2013-08-06 04:40:10 -06:00
|
|
|
return iosrv.ExecIO(o, "WSASendto", func(o *operation) error {
|
|
|
|
return syscall.WSASendto(o.fd.sysfd, &o.buf, 1, &o.qty, 0, o.sa, &o.o, nil)
|
|
|
|
})
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) accept(toAddr func(syscall.Sockaddr) Addr) (*netFD, error) {
|
2012-02-13 22:40:37 -07:00
|
|
|
if err := fd.incref(false); err != nil {
|
2012-02-13 22:57:57 -07:00
|
|
|
return nil, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
defer fd.decref()
|
|
|
|
|
|
|
|
// Get new socket.
|
2013-02-03 22:03:41 -07:00
|
|
|
s, err := sysSocket(fd.family, fd.sotype, 0)
|
2012-01-31 08:36:45 -07:00
|
|
|
if err != nil {
|
2012-12-04 21:13:03 -07:00
|
|
|
return nil, &OpError{"socket", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Associate our new socket with IOCP.
|
2013-07-21 20:49:57 -06:00
|
|
|
netfd, err := newFD(s, fd.family, fd.sotype, fd.net)
|
|
|
|
if err != nil {
|
2012-12-04 21:13:03 -07:00
|
|
|
closesocket(s)
|
2013-07-21 20:49:57 -06:00
|
|
|
return nil, &OpError{"accept", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Submit accept request.
|
2013-08-06 04:40:10 -06:00
|
|
|
o := &fd.rop
|
|
|
|
o.mu.Lock()
|
|
|
|
defer o.mu.Unlock()
|
|
|
|
o.handle = s
|
|
|
|
var rawsa [2]syscall.RawSockaddrAny
|
|
|
|
o.rsan = int32(unsafe.Sizeof(rawsa[0]))
|
|
|
|
_, err = iosrv.ExecIO(o, "AcceptEx", func(o *operation) error {
|
|
|
|
return syscall.AcceptEx(o.fd.sysfd, o.handle, (*byte)(unsafe.Pointer(&rawsa[0])), 0, uint32(o.rsan), uint32(o.rsan), &o.qty, &o.o)
|
|
|
|
})
|
2011-02-22 20:40:24 -07:00
|
|
|
if err != nil {
|
2013-07-29 10:01:13 -06:00
|
|
|
netfd.Close()
|
2011-02-22 20:40:24 -07:00
|
|
|
return nil, err
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Inherit properties of the listening socket.
|
2012-01-31 08:36:45 -07:00
|
|
|
err = syscall.Setsockopt(s, syscall.SOL_SOCKET, syscall.SO_UPDATE_ACCEPT_CONTEXT, (*byte)(unsafe.Pointer(&fd.sysfd)), int32(unsafe.Sizeof(fd.sysfd)))
|
|
|
|
if err != nil {
|
2013-07-29 10:01:13 -06:00
|
|
|
netfd.Close()
|
2012-12-04 21:13:03 -07:00
|
|
|
return nil, &OpError{"Setsockopt", fd.net, fd.laddr, err}
|
2010-06-29 21:23:39 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// Get local and peer addr out of AcceptEx buffer.
|
2011-02-22 20:40:24 -07:00
|
|
|
var lrsa, rrsa *syscall.RawSockaddrAny
|
|
|
|
var llen, rlen int32
|
2013-08-06 04:40:10 -06:00
|
|
|
syscall.GetAcceptExSockaddrs((*byte)(unsafe.Pointer(&rawsa[0])),
|
|
|
|
0, uint32(o.rsan), uint32(o.rsan), &lrsa, &llen, &rrsa, &rlen)
|
2011-02-22 20:40:24 -07:00
|
|
|
lsa, _ := lrsa.Sockaddr()
|
|
|
|
rsa, _ := rrsa.Sockaddr()
|
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
netfd.setAddr(toAddr(lsa), toAddr(rsa))
|
|
|
|
return netfd, nil
|
2011-01-11 21:55:17 -07:00
|
|
|
}
|
|
|
|
|
2011-05-30 02:02:59 -06:00
|
|
|
// Unimplemented functions.
|
2010-11-05 12:02:03 -06:00
|
|
|
|
2012-01-31 08:36:45 -07:00
|
|
|
func (fd *netFD) dup() (*os.File, error) {
|
2010-11-05 12:02:03 -06:00
|
|
|
// TODO: Implement this
|
|
|
|
return nil, os.NewSyscallError("dup", syscall.EWINDOWS)
|
|
|
|
}
|
2010-12-07 11:40:14 -07:00
|
|
|
|
2012-02-16 16:04:29 -07:00
|
|
|
var errNoSupport = errors.New("address family not supported")
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) ReadMsg(p []byte, oob []byte) (n, oobn, flags int, sa syscall.Sockaddr, err error) {
|
2012-02-16 16:04:29 -07:00
|
|
|
return 0, 0, 0, nil, errNoSupport
|
2010-12-07 11:40:14 -07:00
|
|
|
}
|
|
|
|
|
2011-11-01 20:05:34 -06:00
|
|
|
func (fd *netFD) WriteMsg(p []byte, oob []byte, sa syscall.Sockaddr) (n int, oobn int, err error) {
|
2012-02-16 16:04:29 -07:00
|
|
|
return 0, 0, errNoSupport
|
2010-12-07 11:40:14 -07:00
|
|
|
}
|