2014-09-04 00:04:04 -06:00
|
|
|
// Copyright 2013 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
|
|
|
|
|
|
|
|
package runtime
|
|
|
|
|
2015-11-02 12:09:24 -07:00
|
|
|
import (
|
|
|
|
"runtime/internal/atomic"
|
|
|
|
"unsafe"
|
|
|
|
)
|
2014-09-04 00:04:04 -06:00
|
|
|
|
|
|
|
// Integrated network poller (platform-independent part).
|
|
|
|
// A particular implementation (epoll/kqueue) must define the following functions:
|
|
|
|
// func netpollinit() // to initialize the poller
|
|
|
|
// func netpollopen(fd uintptr, pd *pollDesc) int32 // to arm edge-triggered notifications
|
|
|
|
// and associate fd with pd.
|
|
|
|
// An implementation must call the following function to denote that the pd is ready.
|
2014-09-04 01:23:37 -06:00
|
|
|
// func netpollready(gpp **g, pd *pollDesc, mode int32)
|
2014-09-04 00:04:04 -06:00
|
|
|
|
|
|
|
// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
|
|
|
|
// goroutines respectively. The semaphore can be in the following states:
|
|
|
|
// pdReady - io readiness notification is pending;
|
|
|
|
// a goroutine consumes the notification by changing the state to nil.
|
|
|
|
// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
|
|
|
|
// the goroutine commits to park by changing the state to G pointer,
|
|
|
|
// or, alternatively, concurrent io notification changes the state to READY,
|
|
|
|
// or, alternatively, concurrent timeout/close changes the state to nil.
|
|
|
|
// G pointer - the goroutine is blocked on the semaphore;
|
|
|
|
// io notification or timeout/close changes the state to READY or nil respectively
|
|
|
|
// and unparks the goroutine.
|
|
|
|
// nil - nothing of the above.
|
|
|
|
const (
|
|
|
|
pdReady uintptr = 1
|
|
|
|
pdWait uintptr = 2
|
|
|
|
)
|
|
|
|
|
|
|
|
const pollBlockSize = 4 * 1024
|
|
|
|
|
|
|
|
// Network poller descriptor.
|
2016-10-11 20:58:21 -06:00
|
|
|
//
|
|
|
|
// No heap pointers.
|
|
|
|
//
|
|
|
|
//go:notinheap
|
2014-09-04 00:04:04 -06:00
|
|
|
type pollDesc struct {
|
|
|
|
link *pollDesc // in pollcache, protected by pollcache.lock
|
|
|
|
|
|
|
|
// The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
|
|
|
|
// This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
|
|
|
|
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
|
|
|
|
// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
|
|
|
|
// in a lock-free way by all operations.
|
|
|
|
// NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
|
|
|
|
// that will blow up when GC starts moving objects.
|
2015-06-11 07:49:38 -06:00
|
|
|
lock mutex // protects the following fields
|
2014-09-04 00:04:04 -06:00
|
|
|
fd uintptr
|
|
|
|
closing bool
|
[dev.cc] runtime: convert Solaris port to Go
Memory management was consolitated with the BSD ports, since
it was almost identical.
Assembly thunks are gone, being replaced by the new //go:linkname
feature.
This change supersedes CL 138390043 (runtime: convert solaris
netpoll to Go), which was previously reviewed and tested.
This change is only the first step, the port now builds,
but doesn't run. Binaries fail to exec:
ld.so.1: 6.out: fatal: 6.out: TLS requirement failure : TLS support is unavailable
Killed
This seems to happen because binaries don't link with libc.so
anymore. We will have to solve that in a different CL.
Also this change is just a rough translation of the original
C code, cleanup will come in a different CL.
[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]
LGTM=rsc
R=rsc, dave
CC=golang-codereviews, iant, khr, minux, r, rlh
https://golang.org/cl/174960043
2014-11-13 08:07:10 -07:00
|
|
|
seq uintptr // protects from stale timers and ready notifications
|
|
|
|
rg uintptr // pdReady, pdWait, G waiting for read or nil
|
|
|
|
rt timer // read deadline timer (set if rt.f != nil)
|
|
|
|
rd int64 // read deadline
|
|
|
|
wg uintptr // pdReady, pdWait, G waiting for write or nil
|
|
|
|
wt timer // write deadline timer
|
|
|
|
wd int64 // write deadline
|
|
|
|
user uint32 // user settable cookie
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
type pollCache struct {
|
|
|
|
lock mutex
|
|
|
|
first *pollDesc
|
|
|
|
// PollDesc objects must be type-stable,
|
|
|
|
// because we can get ready notification from epoll/kqueue
|
|
|
|
// after the descriptor is closed/reused.
|
|
|
|
// Stale notifications are detected using seq variable,
|
|
|
|
// seq is incremented when deadlines are changed or descriptor is reused.
|
|
|
|
}
|
|
|
|
|
2015-01-13 10:12:50 -07:00
|
|
|
var (
|
2017-02-10 16:17:38 -07:00
|
|
|
netpollInited uint32
|
|
|
|
pollcache pollCache
|
|
|
|
netpollWaiters uint32
|
2015-01-13 10:12:50 -07:00
|
|
|
)
|
2014-09-04 00:04:04 -06:00
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollServerInit internal/poll.runtime_pollServerInit
|
|
|
|
func poll_runtime_pollServerInit() {
|
[dev.cc] runtime: convert Solaris port to Go
Memory management was consolitated with the BSD ports, since
it was almost identical.
Assembly thunks are gone, being replaced by the new //go:linkname
feature.
This change supersedes CL 138390043 (runtime: convert solaris
netpoll to Go), which was previously reviewed and tested.
This change is only the first step, the port now builds,
but doesn't run. Binaries fail to exec:
ld.so.1: 6.out: fatal: 6.out: TLS requirement failure : TLS support is unavailable
Killed
This seems to happen because binaries don't link with libc.so
anymore. We will have to solve that in a different CL.
Also this change is just a rough translation of the original
C code, cleanup will come in a different CL.
[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]
LGTM=rsc
R=rsc, dave
CC=golang-codereviews, iant, khr, minux, r, rlh
https://golang.org/cl/174960043
2014-11-13 08:07:10 -07:00
|
|
|
netpollinit()
|
2015-11-02 12:09:24 -07:00
|
|
|
atomic.Store(&netpollInited, 1)
|
2015-01-13 10:12:50 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
func netpollinited() bool {
|
2015-11-02 12:09:24 -07:00
|
|
|
return atomic.Load(&netpollInited) != 0
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
|
2017-02-10 16:17:38 -07:00
|
|
|
//go:linkname poll_runtime_pollServerDescriptor internal/poll.runtime_pollServerDescriptor
|
|
|
|
|
|
|
|
// poll_runtime_pollServerDescriptor returns the descriptor being used,
|
|
|
|
// or ^uintptr(0) if the system does not use a poll descriptor.
|
|
|
|
func poll_runtime_pollServerDescriptor() uintptr {
|
|
|
|
return netpolldescriptor()
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollOpen internal/poll.runtime_pollOpen
|
|
|
|
func poll_runtime_pollOpen(fd uintptr) (*pollDesc, int) {
|
2014-09-04 00:04:04 -06:00
|
|
|
pd := pollcache.alloc()
|
|
|
|
lock(&pd.lock)
|
|
|
|
if pd.wg != 0 && pd.wg != pdReady {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: blocked write on free polldesc")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
if pd.rg != 0 && pd.rg != pdReady {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: blocked read on free polldesc")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
pd.fd = fd
|
|
|
|
pd.closing = false
|
|
|
|
pd.seq++
|
|
|
|
pd.rg = 0
|
|
|
|
pd.rd = 0
|
|
|
|
pd.wg = 0
|
|
|
|
pd.wd = 0
|
|
|
|
unlock(&pd.lock)
|
|
|
|
|
2014-09-06 19:16:35 -06:00
|
|
|
var errno int32
|
[dev.cc] runtime: convert Solaris port to Go
Memory management was consolitated with the BSD ports, since
it was almost identical.
Assembly thunks are gone, being replaced by the new //go:linkname
feature.
This change supersedes CL 138390043 (runtime: convert solaris
netpoll to Go), which was previously reviewed and tested.
This change is only the first step, the port now builds,
but doesn't run. Binaries fail to exec:
ld.so.1: 6.out: fatal: 6.out: TLS requirement failure : TLS support is unavailable
Killed
This seems to happen because binaries don't link with libc.so
anymore. We will have to solve that in a different CL.
Also this change is just a rough translation of the original
C code, cleanup will come in a different CL.
[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]
LGTM=rsc
R=rsc, dave
CC=golang-codereviews, iant, khr, minux, r, rlh
https://golang.org/cl/174960043
2014-11-13 08:07:10 -07:00
|
|
|
errno = netpollopen(fd, pd)
|
2014-09-04 00:04:04 -06:00
|
|
|
return pd, int(errno)
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollClose internal/poll.runtime_pollClose
|
|
|
|
func poll_runtime_pollClose(pd *pollDesc) {
|
2014-09-04 00:04:04 -06:00
|
|
|
if !pd.closing {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: close polldesc w/o unblock")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
if pd.wg != 0 && pd.wg != pdReady {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: blocked write on closing polldesc")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
if pd.rg != 0 && pd.rg != pdReady {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: blocked read on closing polldesc")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
2016-02-29 16:01:00 -07:00
|
|
|
netpollclose(pd.fd)
|
2014-09-04 00:04:04 -06:00
|
|
|
pollcache.free(pd)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *pollCache) free(pd *pollDesc) {
|
|
|
|
lock(&c.lock)
|
|
|
|
pd.link = c.first
|
|
|
|
c.first = pd
|
|
|
|
unlock(&c.lock)
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollReset internal/poll.runtime_pollReset
|
|
|
|
func poll_runtime_pollReset(pd *pollDesc, mode int) int {
|
2014-09-04 00:04:04 -06:00
|
|
|
err := netpollcheckerr(pd, int32(mode))
|
|
|
|
if err != 0 {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if mode == 'r' {
|
|
|
|
pd.rg = 0
|
|
|
|
} else if mode == 'w' {
|
|
|
|
pd.wg = 0
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollWait internal/poll.runtime_pollWait
|
|
|
|
func poll_runtime_pollWait(pd *pollDesc, mode int) int {
|
2014-09-04 00:04:04 -06:00
|
|
|
err := netpollcheckerr(pd, int32(mode))
|
|
|
|
if err != 0 {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// As for now only Solaris uses level-triggered IO.
|
|
|
|
if GOOS == "solaris" {
|
[dev.cc] runtime: convert Solaris port to Go
Memory management was consolitated with the BSD ports, since
it was almost identical.
Assembly thunks are gone, being replaced by the new //go:linkname
feature.
This change supersedes CL 138390043 (runtime: convert solaris
netpoll to Go), which was previously reviewed and tested.
This change is only the first step, the port now builds,
but doesn't run. Binaries fail to exec:
ld.so.1: 6.out: fatal: 6.out: TLS requirement failure : TLS support is unavailable
Killed
This seems to happen because binaries don't link with libc.so
anymore. We will have to solve that in a different CL.
Also this change is just a rough translation of the original
C code, cleanup will come in a different CL.
[This CL is part of the removal of C code from package runtime.
See golang.org/s/dev.cc for an overview.]
LGTM=rsc
R=rsc, dave
CC=golang-codereviews, iant, khr, minux, r, rlh
https://golang.org/cl/174960043
2014-11-13 08:07:10 -07:00
|
|
|
netpollarm(pd, mode)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
for !netpollblock(pd, int32(mode), false) {
|
|
|
|
err = netpollcheckerr(pd, int32(mode))
|
|
|
|
if err != 0 {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
// Can happen if timeout has fired and unblocked us,
|
|
|
|
// but before we had a chance to run, timeout has been reset.
|
|
|
|
// Pretend it has not happened and retry.
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollWaitCanceled internal/poll.runtime_pollWaitCanceled
|
|
|
|
func poll_runtime_pollWaitCanceled(pd *pollDesc, mode int) {
|
2014-09-04 00:04:04 -06:00
|
|
|
// This function is used only on windows after a failed attempt to cancel
|
|
|
|
// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
|
|
|
|
for !netpollblock(pd, int32(mode), true) {
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollSetDeadline internal/poll.runtime_pollSetDeadline
|
|
|
|
func poll_runtime_pollSetDeadline(pd *pollDesc, d int64, mode int) {
|
2014-09-04 00:04:04 -06:00
|
|
|
lock(&pd.lock)
|
|
|
|
if pd.closing {
|
|
|
|
unlock(&pd.lock)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
pd.seq++ // invalidate current timers
|
|
|
|
// Reset current timers.
|
|
|
|
if pd.rt.f != nil {
|
|
|
|
deltimer(&pd.rt)
|
|
|
|
pd.rt.f = nil
|
|
|
|
}
|
|
|
|
if pd.wt.f != nil {
|
|
|
|
deltimer(&pd.wt)
|
|
|
|
pd.wt.f = nil
|
|
|
|
}
|
|
|
|
// Setup new timers.
|
|
|
|
if d != 0 && d <= nanotime() {
|
|
|
|
d = -1
|
|
|
|
}
|
|
|
|
if mode == 'r' || mode == 'r'+'w' {
|
|
|
|
pd.rd = d
|
|
|
|
}
|
|
|
|
if mode == 'w' || mode == 'r'+'w' {
|
|
|
|
pd.wd = d
|
|
|
|
}
|
|
|
|
if pd.rd > 0 && pd.rd == pd.wd {
|
|
|
|
pd.rt.f = netpollDeadline
|
|
|
|
pd.rt.when = pd.rd
|
|
|
|
// Copy current seq into the timer arg.
|
|
|
|
// Timer func will check the seq against current descriptor seq,
|
|
|
|
// if they differ the descriptor was reused or timers were reset.
|
|
|
|
pd.rt.arg = pd
|
|
|
|
pd.rt.seq = pd.seq
|
|
|
|
addtimer(&pd.rt)
|
|
|
|
} else {
|
|
|
|
if pd.rd > 0 {
|
|
|
|
pd.rt.f = netpollReadDeadline
|
|
|
|
pd.rt.when = pd.rd
|
|
|
|
pd.rt.arg = pd
|
|
|
|
pd.rt.seq = pd.seq
|
|
|
|
addtimer(&pd.rt)
|
|
|
|
}
|
|
|
|
if pd.wd > 0 {
|
|
|
|
pd.wt.f = netpollWriteDeadline
|
|
|
|
pd.wt.when = pd.wd
|
|
|
|
pd.wt.arg = pd
|
|
|
|
pd.wt.seq = pd.seq
|
|
|
|
addtimer(&pd.wt)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// If we set the new deadline in the past, unblock currently pending IO if any.
|
|
|
|
var rg, wg *g
|
|
|
|
atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
|
|
|
|
if pd.rd < 0 {
|
|
|
|
rg = netpollunblock(pd, 'r', false)
|
|
|
|
}
|
|
|
|
if pd.wd < 0 {
|
|
|
|
wg = netpollunblock(pd, 'w', false)
|
|
|
|
}
|
|
|
|
unlock(&pd.lock)
|
|
|
|
if rg != nil {
|
2017-02-10 16:17:38 -07:00
|
|
|
netpollgoready(rg, 3)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
if wg != nil {
|
2017-02-10 16:17:38 -07:00
|
|
|
netpollgoready(wg, 3)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-02-10 15:59:38 -07:00
|
|
|
//go:linkname poll_runtime_pollUnblock internal/poll.runtime_pollUnblock
|
|
|
|
func poll_runtime_pollUnblock(pd *pollDesc) {
|
2014-09-04 00:04:04 -06:00
|
|
|
lock(&pd.lock)
|
|
|
|
if pd.closing {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: unblock on closing polldesc")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
pd.closing = true
|
|
|
|
pd.seq++
|
|
|
|
var rg, wg *g
|
|
|
|
atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock
|
|
|
|
rg = netpollunblock(pd, 'r', false)
|
|
|
|
wg = netpollunblock(pd, 'w', false)
|
|
|
|
if pd.rt.f != nil {
|
|
|
|
deltimer(&pd.rt)
|
|
|
|
pd.rt.f = nil
|
|
|
|
}
|
|
|
|
if pd.wt.f != nil {
|
|
|
|
deltimer(&pd.wt)
|
|
|
|
pd.wt.f = nil
|
|
|
|
}
|
|
|
|
unlock(&pd.lock)
|
|
|
|
if rg != nil {
|
2017-02-10 16:17:38 -07:00
|
|
|
netpollgoready(rg, 3)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
if wg != nil {
|
2017-02-10 16:17:38 -07:00
|
|
|
netpollgoready(wg, 3)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
|
runtime: Remove write barriers during STW.
The GC assumes that there will be no asynchronous write barriers when
the world is stopped. This keeps the synchronization between write
barriers and the GC simple. However, currently, there are a few places
in runtime code where this assumption does not hold.
The GC stops the world by collecting all Ps, which stops all user Go
code, but small parts of the runtime can run without a P. For example,
the code that releases a P must still deschedule its G onto a runnable
queue before stopping. Similarly, when a G returns from a long-running
syscall, it must run code to reacquire a P.
Currently, this code can contain write barriers. This can lead to the
GC collecting reachable objects if something like the following
sequence of events happens:
1. GC stops the world by collecting all Ps.
2. G #1 returns from a syscall (for example), tries to install a
pointer to object X, and calls greyobject on X.
3. greyobject on G #1 marks X, but does not yet add it to a write
buffer. At this point, X is effectively black, not grey, even though
it may point to white objects.
4. GC reaches X through some other path and calls greyobject on X, but
greyobject does nothing because X is already marked.
5. GC completes.
6. greyobject on G #1 adds X to a work buffer, but it's too late.
7. Objects that were reachable only through X are incorrectly collected.
To fix this, we check the invariant that no asynchronous write
barriers happen when the world is stopped by checking that write
barriers always have a P, and modify all currently known sources of
these writes to disable the write barrier. In all modified cases this
is safe because the object in question will always be reachable via
some other path.
Some of the trace code was turned off, in particular the
code that traces returning from a syscall. The GC assumes
that as far as the heap is concerned the thread is stopped
when it is in a syscall. Upon returning the trace code
must not do any heap writes for the same reasons discussed
above.
Fixes #10098
Fixes #9953
Fixes #9951
Fixes #9884
May relate to #9610 #9771
Change-Id: Ic2e70b7caffa053e56156838eb8d89503e3c0c8a
Reviewed-on: https://go-review.googlesource.com/7504
Reviewed-by: Austin Clements <austin@google.com>
2015-03-12 12:19:21 -06:00
|
|
|
// May run during STW, so write barriers are not allowed.
|
|
|
|
//go:nowritebarrier
|
2015-04-16 22:21:30 -06:00
|
|
|
func netpollready(gpp *guintptr, pd *pollDesc, mode int32) {
|
|
|
|
var rg, wg guintptr
|
2014-09-04 00:04:04 -06:00
|
|
|
if mode == 'r' || mode == 'r'+'w' {
|
2015-04-16 22:21:30 -06:00
|
|
|
rg.set(netpollunblock(pd, 'r', true))
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
if mode == 'w' || mode == 'r'+'w' {
|
2015-04-16 22:21:30 -06:00
|
|
|
wg.set(netpollunblock(pd, 'w', true))
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
2015-04-16 22:21:30 -06:00
|
|
|
if rg != 0 {
|
|
|
|
rg.ptr().schedlink = *gpp
|
|
|
|
*gpp = rg
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
2015-04-16 22:21:30 -06:00
|
|
|
if wg != 0 {
|
|
|
|
wg.ptr().schedlink = *gpp
|
|
|
|
*gpp = wg
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpollcheckerr(pd *pollDesc, mode int32) int {
|
|
|
|
if pd.closing {
|
|
|
|
return 1 // errClosing
|
|
|
|
}
|
|
|
|
if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) {
|
|
|
|
return 2 // errTimeout
|
|
|
|
}
|
|
|
|
return 0
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
|
2017-02-10 16:17:38 -07:00
|
|
|
r := atomic.Casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
|
|
|
|
if r {
|
|
|
|
// Bump the count of goroutines waiting for the poller.
|
|
|
|
// The scheduler uses this to decide whether to block
|
|
|
|
// waiting for the poller if there is nothing else to do.
|
|
|
|
atomic.Xadd(&netpollWaiters, 1)
|
|
|
|
}
|
|
|
|
return r
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpollgoready(gp *g, traceskip int) {
|
|
|
|
atomic.Xadd(&netpollWaiters, -1)
|
|
|
|
goready(gp, traceskip+1)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
// returns true if IO is ready, or false if timedout or closed
|
|
|
|
// waitio - wait only for completed IO, ignore errors
|
|
|
|
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
|
|
|
|
gpp := &pd.rg
|
|
|
|
if mode == 'w' {
|
|
|
|
gpp = &pd.wg
|
|
|
|
}
|
|
|
|
|
|
|
|
// set the gpp semaphore to WAIT
|
|
|
|
for {
|
|
|
|
old := *gpp
|
|
|
|
if old == pdReady {
|
|
|
|
*gpp = 0
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if old != 0 {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: double wait")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Casuintptr(gpp, 0, pdWait) {
|
2014-09-04 00:04:04 -06:00
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// need to recheck error states after setting gpp to WAIT
|
|
|
|
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
|
|
|
|
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
|
|
|
|
if waitio || netpollcheckerr(pd, mode) == 0 {
|
2018-03-13 09:20:33 -06:00
|
|
|
gopark(netpollblockcommit, unsafe.Pointer(gpp), "IO wait", traceEvGoBlockNet, 5)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
// be careful to not lose concurrent READY notification
|
2015-11-02 12:09:24 -07:00
|
|
|
old := atomic.Xchguintptr(gpp, 0)
|
2014-09-04 00:04:04 -06:00
|
|
|
if old > pdWait {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: corrupted polldesc")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
return old == pdReady
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
|
|
|
|
gpp := &pd.rg
|
|
|
|
if mode == 'w' {
|
|
|
|
gpp = &pd.wg
|
|
|
|
}
|
|
|
|
|
|
|
|
for {
|
|
|
|
old := *gpp
|
|
|
|
if old == pdReady {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if old == 0 && !ioready {
|
|
|
|
// Only set READY for ioready. runtime_pollWait
|
|
|
|
// will check for timeout/cancel before waiting.
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
var new uintptr
|
|
|
|
if ioready {
|
|
|
|
new = pdReady
|
|
|
|
}
|
2015-11-02 12:09:24 -07:00
|
|
|
if atomic.Casuintptr(gpp, old, new) {
|
2014-09-04 00:04:04 -06:00
|
|
|
if old == pdReady || old == pdWait {
|
|
|
|
old = 0
|
|
|
|
}
|
|
|
|
return (*g)(unsafe.Pointer(old))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
|
|
|
|
lock(&pd.lock)
|
|
|
|
// Seq arg is seq when the timer was set.
|
|
|
|
// If it's stale, ignore the timer event.
|
|
|
|
if seq != pd.seq {
|
|
|
|
// The descriptor was reused or timers were reset.
|
|
|
|
unlock(&pd.lock)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var rg *g
|
|
|
|
if read {
|
|
|
|
if pd.rd <= 0 || pd.rt.f == nil {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: inconsistent read deadline")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
pd.rd = -1
|
|
|
|
atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
|
|
|
|
rg = netpollunblock(pd, 'r', false)
|
|
|
|
}
|
|
|
|
var wg *g
|
|
|
|
if write {
|
|
|
|
if pd.wd <= 0 || pd.wt.f == nil && !read {
|
2017-04-24 03:37:48 -06:00
|
|
|
throw("runtime: inconsistent write deadline")
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
pd.wd = -1
|
|
|
|
atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
|
|
|
|
wg = netpollunblock(pd, 'w', false)
|
|
|
|
}
|
|
|
|
unlock(&pd.lock)
|
|
|
|
if rg != nil {
|
2017-02-10 16:17:38 -07:00
|
|
|
netpollgoready(rg, 0)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
if wg != nil {
|
2017-02-10 16:17:38 -07:00
|
|
|
netpollgoready(wg, 0)
|
2014-09-04 00:04:04 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpollDeadline(arg interface{}, seq uintptr) {
|
|
|
|
netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpollReadDeadline(arg interface{}, seq uintptr) {
|
|
|
|
netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func netpollWriteDeadline(arg interface{}, seq uintptr) {
|
|
|
|
netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *pollCache) alloc() *pollDesc {
|
|
|
|
lock(&c.lock)
|
|
|
|
if c.first == nil {
|
|
|
|
const pdSize = unsafe.Sizeof(pollDesc{})
|
|
|
|
n := pollBlockSize / pdSize
|
|
|
|
if n == 0 {
|
|
|
|
n = 1
|
|
|
|
}
|
|
|
|
// Must be in non-GC memory because can be referenced
|
|
|
|
// only from epoll/kqueue internals.
|
|
|
|
mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
|
|
|
|
for i := uintptr(0); i < n; i++ {
|
|
|
|
pd := (*pollDesc)(add(mem, i*pdSize))
|
|
|
|
pd.link = c.first
|
|
|
|
c.first = pd
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pd := c.first
|
|
|
|
c.first = pd.link
|
|
|
|
unlock(&c.lock)
|
|
|
|
return pd
|
|
|
|
}
|