1
0
mirror of https://github.com/golang/go synced 2024-11-18 19:04:40 -07:00

runtime: convert netpoll to Go

The common code is converted, epoll and kqueue are converted.
Windows and solaris are still C.

LGTM=rsc
R=golang-codereviews, rsc, dave
CC=golang-codereviews, iant, khr, rsc
https://golang.org/cl/132910043
This commit is contained in:
Dmitriy Vyukov 2014-09-04 10:04:04 +04:00
parent dae8038639
commit 91a670d179
31 changed files with 757 additions and 773 deletions

View File

@ -405,6 +405,9 @@ func (w *Walker) parseFile(dir, file string) (*ast.File, error) {
" sudog struct{};" +
" waitq struct{};" +
" wincallbackcontext struct{};" +
" keventt struct{};" +
" timespec struct{};" +
" epollevent struct{};" +
"); " +
"const (" +
" cb_max = 2000;" +

View File

@ -598,6 +598,9 @@ TEXT runtime·xchgp(SB), NOSPLIT, $0-12
MOVL AX, ret+8(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-12
JMP runtime·xchg(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX
again:

View File

@ -709,6 +709,9 @@ TEXT runtime·xchgp(SB), NOSPLIT, $0-24
MOVQ AX, ret+16(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-24
JMP runtime·xchg64(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX
again:

View File

@ -666,6 +666,16 @@ TEXT runtime·xchg64(SB), NOSPLIT, $0-24
MOVQ AX, ret+16(FP)
RET
TEXT runtime·xchgp(SB), NOSPLIT, $0-12
MOVL ptr+0(FP), BX
MOVL new+4(FP), AX
XCHGL AX, 0(BX)
MOVL AX, ret+8(FP)
RET
TEXT runtime·xchguintptr(SB), NOSPLIT, $0-12
JMP runtime·xchg(SB)
TEXT runtime·procyield(SB),NOSPLIT,$0-0
MOVL cycles+0(FP), AX
again:

View File

@ -54,6 +54,13 @@ runtime·xchgp(void* volatile* addr, void* v)
}
}
#pragma textflag NOSPLIT
void*
runtime·xchguintptr(void* volatile* addr, void* v)
{
return runtime·xchg((uint32*)addr, (uint32)v);
}
#pragma textflag NOSPLIT
void
runtime·procyield(uint32 cnt)

448
src/pkg/runtime/netpoll.go Normal file
View File

@ -0,0 +1,448 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
package runtime
import "unsafe"
// Integrated network poller (platform-independent part).
// A particular implementation (epoll/kqueue) must define the following functions:
// func netpollinit() // to initialize the poller
// func netpollopen(fd uintptr, pd *pollDesc) int32 // to arm edge-triggered notifications
// and associate fd with pd.
// An implementation must call the following function to denote that the pd is ready.
// func netpollready(rg, wg **gp, pd *pollDesc, mode int32)
// pollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
// goroutines respectively. The semaphore can be in the following states:
// pdReady - io readiness notification is pending;
// a goroutine consumes the notification by changing the state to nil.
// pdWait - a goroutine prepares to park on the semaphore, but not yet parked;
// the goroutine commits to park by changing the state to G pointer,
// or, alternatively, concurrent io notification changes the state to READY,
// or, alternatively, concurrent timeout/close changes the state to nil.
// G pointer - the goroutine is blocked on the semaphore;
// io notification or timeout/close changes the state to READY or nil respectively
// and unparks the goroutine.
// nil - nothing of the above.
const (
pdReady uintptr = 1
pdWait uintptr = 2
)
const pollBlockSize = 4 * 1024
// Network poller descriptor.
type pollDesc struct {
link *pollDesc // in pollcache, protected by pollcache.lock
// The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
// This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO readiness notification)
// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
// in a lock-free way by all operations.
// NOTE(dvyukov): the following code uses uintptr to store *g (rg/wg),
// that will blow up when GC starts moving objects.
lock mutex // protectes the following fields
fd uintptr
closing bool
seq uintptr // protects from stale timers and ready notifications
rg uintptr // pdReady, pdWait, G waiting for read or nil
rt timer // read deadline timer (set if rt.f != nil)
rd int64 // read deadline
wg uintptr // pdReady, pdWait, G waiting for write or nil
wt timer // write deadline timer
wd int64 // write deadline
user unsafe.Pointer // user settable cookie
}
type pollCache struct {
lock mutex
first *pollDesc
// PollDesc objects must be type-stable,
// because we can get ready notification from epoll/kqueue
// after the descriptor is closed/reused.
// Stale notifications are detected using seq variable,
// seq is incremented when deadlines are changed or descriptor is reused.
}
var pollcache pollCache
func netpollServerInit() {
netpollinit()
}
func netpollOpen(fd uintptr) (*pollDesc, int) {
pd := pollcache.alloc()
lock(&pd.lock)
if pd.wg != 0 && pd.wg != pdReady {
gothrow("netpollOpen: blocked write on free descriptor")
}
if pd.rg != 0 && pd.rg != pdReady {
gothrow("netpollOpen: blocked read on free descriptor")
}
pd.fd = fd
pd.closing = false
pd.seq++
pd.rg = 0
pd.rd = 0
pd.wg = 0
pd.wd = 0
unlock(&pd.lock)
errno := netpollopen(fd, pd)
return pd, int(errno)
}
func netpollClose(pd *pollDesc) {
if !pd.closing {
gothrow("runtime_pollClose: close w/o unblock")
}
if pd.wg != 0 && pd.wg != pdReady {
gothrow("netpollClose: blocked write on closing descriptor")
}
if pd.rg != 0 && pd.rg != pdReady {
gothrow("netpollClose: blocked read on closing descriptor")
}
netpollclose(uintptr(pd.fd))
pollcache.free(pd)
}
func (c *pollCache) free(pd *pollDesc) {
lock(&c.lock)
pd.link = c.first
c.first = pd
unlock(&c.lock)
}
func netpollReset(pd *pollDesc, mode int) int {
err := netpollcheckerr(pd, int32(mode))
if err != 0 {
return err
}
if mode == 'r' {
pd.rg = 0
} else if mode == 'w' {
pd.wg = 0
}
return 0
}
func netpollWait(pd *pollDesc, mode int) int {
err := netpollcheckerr(pd, int32(mode))
if err != 0 {
return err
}
// As for now only Solaris uses level-triggered IO.
if GOOS == "solaris" {
netpollarm(pd, mode)
}
for !netpollblock(pd, int32(mode), false) {
err = netpollcheckerr(pd, int32(mode))
if err != 0 {
return err
}
// Can happen if timeout has fired and unblocked us,
// but before we had a chance to run, timeout has been reset.
// Pretend it has not happened and retry.
}
return 0
}
func netpollWaitCanceled(pd *pollDesc, mode int) {
// This function is used only on windows after a failed attempt to cancel
// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
for !netpollblock(pd, int32(mode), true) {
}
}
func netpollSetDeadline(pd *pollDesc, d int64, mode int) {
lock(&pd.lock)
if pd.closing {
unlock(&pd.lock)
return
}
pd.seq++ // invalidate current timers
// Reset current timers.
if pd.rt.f != nil {
deltimer(&pd.rt)
pd.rt.f = nil
}
if pd.wt.f != nil {
deltimer(&pd.wt)
pd.wt.f = nil
}
// Setup new timers.
if d != 0 && d <= nanotime() {
d = -1
}
if mode == 'r' || mode == 'r'+'w' {
pd.rd = d
}
if mode == 'w' || mode == 'r'+'w' {
pd.wd = d
}
if pd.rd > 0 && pd.rd == pd.wd {
pd.rt.f = netpollDeadline
pd.rt.when = pd.rd
// Copy current seq into the timer arg.
// Timer func will check the seq against current descriptor seq,
// if they differ the descriptor was reused or timers were reset.
pd.rt.arg = pd
pd.rt.seq = pd.seq
addtimer(&pd.rt)
} else {
if pd.rd > 0 {
pd.rt.f = netpollReadDeadline
pd.rt.when = pd.rd
pd.rt.arg = pd
pd.rt.seq = pd.seq
addtimer(&pd.rt)
}
if pd.wd > 0 {
pd.wt.f = netpollWriteDeadline
pd.wt.when = pd.wd
pd.wt.arg = pd
pd.wt.seq = pd.seq
addtimer(&pd.wt)
}
}
// If we set the new deadline in the past, unblock currently pending IO if any.
var rg, wg *g
atomicstorep(unsafe.Pointer(&wg), nil) // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
if pd.rd < 0 {
rg = netpollunblock(pd, 'r', false)
}
if pd.wd < 0 {
wg = netpollunblock(pd, 'w', false)
}
unlock(&pd.lock)
if rg != nil {
goready(rg)
}
if wg != nil {
goready(wg)
}
}
func netpollUnblock(pd *pollDesc) {
lock(&pd.lock)
if pd.closing {
gothrow("netpollUnblock: already closing")
}
pd.closing = true
pd.seq++
var rg, wg *g
atomicstorep(unsafe.Pointer(&rg), nil) // full memory barrier between store to closing and read of rg/wg in netpollunblock
rg = netpollunblock(pd, 'r', false)
wg = netpollunblock(pd, 'w', false)
if pd.rt.f != nil {
deltimer(&pd.rt)
pd.rt.f = nil
}
if pd.wt.f != nil {
deltimer(&pd.wt)
pd.wt.f = nil
}
unlock(&pd.lock)
if rg != nil {
goready(rg)
}
if wg != nil {
goready(wg)
}
}
func netpollfd(pd *pollDesc) uintptr {
return pd.fd
}
func netpolluser(pd *pollDesc) *unsafe.Pointer {
return &pd.user
}
func netpollclosing(pd *pollDesc) bool {
return pd.closing
}
func netpolllock(pd *pollDesc) {
lock(&pd.lock)
}
func netpollunlock(pd *pollDesc) {
lock(&pd.lock)
}
// make pd ready, newly runnable goroutines (if any) are returned in rg/wg
func netpollready(gpp **g, pd *pollDesc, mode int32) {
var rg, wg *g
if mode == 'r' || mode == 'r'+'w' {
rg = netpollunblock(pd, 'r', true)
}
if mode == 'w' || mode == 'r'+'w' {
wg = netpollunblock(pd, 'w', true)
}
if rg != nil {
rg.schedlink = *gpp
*gpp = rg
}
if wg != nil {
wg.schedlink = *gpp
*gpp = wg
}
}
func netpollcheckerr(pd *pollDesc, mode int32) int {
if pd.closing {
return 1 // errClosing
}
if (mode == 'r' && pd.rd < 0) || (mode == 'w' && pd.wd < 0) {
return 2 // errTimeout
}
return 0
}
func netpollblockcommit(gp *g, gpp unsafe.Pointer) bool {
return casuintptr((*uintptr)(gpp), pdWait, uintptr(unsafe.Pointer(gp)))
}
// returns true if IO is ready, or false if timedout or closed
// waitio - wait only for completed IO, ignore errors
func netpollblock(pd *pollDesc, mode int32, waitio bool) bool {
gpp := &pd.rg
if mode == 'w' {
gpp = &pd.wg
}
// set the gpp semaphore to WAIT
for {
old := *gpp
if old == pdReady {
*gpp = 0
return true
}
if old != 0 {
gothrow("netpollblock: double wait")
}
if casuintptr(gpp, 0, pdWait) {
break
}
}
// need to recheck error states after setting gpp to WAIT
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
if waitio || netpollcheckerr(pd, mode) == 0 {
f := netpollblockcommit
gopark(**(**unsafe.Pointer)(unsafe.Pointer(&f)), unsafe.Pointer(gpp), "IO wait")
}
// be careful to not lose concurrent READY notification
old := xchguintptr(gpp, 0)
if old > pdWait {
gothrow("netpollblock: corrupted state")
}
return old == pdReady
}
func netpollunblock(pd *pollDesc, mode int32, ioready bool) *g {
gpp := &pd.rg
if mode == 'w' {
gpp = &pd.wg
}
for {
old := *gpp
if old == pdReady {
return nil
}
if old == 0 && !ioready {
// Only set READY for ioready. runtime_pollWait
// will check for timeout/cancel before waiting.
return nil
}
var new uintptr
if ioready {
new = pdReady
}
if casuintptr(gpp, old, new) {
if old == pdReady || old == pdWait {
old = 0
}
return (*g)(unsafe.Pointer(old))
}
}
}
func netpolldeadlineimpl(pd *pollDesc, seq uintptr, read, write bool) {
lock(&pd.lock)
// Seq arg is seq when the timer was set.
// If it's stale, ignore the timer event.
if seq != pd.seq {
// The descriptor was reused or timers were reset.
unlock(&pd.lock)
return
}
var rg *g
if read {
if pd.rd <= 0 || pd.rt.f == nil {
gothrow("netpollDeadlineImpl: inconsistent read deadline")
}
pd.rd = -1
atomicstorep(unsafe.Pointer(&pd.rt.f), nil) // full memory barrier between store to rd and load of rg in netpollunblock
rg = netpollunblock(pd, 'r', false)
}
var wg *g
if write {
if pd.wd <= 0 || pd.wt.f == nil && !read {
gothrow("netpolldeadlineimpl: inconsistent write deadline")
}
pd.wd = -1
atomicstorep(unsafe.Pointer(&pd.wt.f), nil) // full memory barrier between store to wd and load of wg in netpollunblock
wg = netpollunblock(pd, 'w', false)
}
unlock(&pd.lock)
if rg != nil {
goready(rg)
}
if wg != nil {
goready(wg)
}
}
func netpollDeadline(arg interface{}, seq uintptr) {
netpolldeadlineimpl(arg.(*pollDesc), seq, true, true)
}
func netpollReadDeadline(arg interface{}, seq uintptr) {
netpolldeadlineimpl(arg.(*pollDesc), seq, true, false)
}
func netpollWriteDeadline(arg interface{}, seq uintptr) {
netpolldeadlineimpl(arg.(*pollDesc), seq, false, true)
}
func (c *pollCache) alloc() *pollDesc {
lock(&c.lock)
if c.first == nil {
const pdSize = unsafe.Sizeof(pollDesc{})
n := pollBlockSize / pdSize
if n == 0 {
n = 1
}
// Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals.
mem := persistentalloc(n*pdSize, 0, &memstats.other_sys)
for i := uintptr(0); i < n; i++ {
pd := (*pollDesc)(add(mem, i*pdSize))
pd.link = c.first
c.first = pd
}
}
pd := c.first
c.first = pd.link
unlock(&c.lock)
return pd
}

View File

@ -1,481 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
package net
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
#include "arch_GOARCH.h"
#include "malloc.h"
// Integrated network poller (platform-independent part).
// A particular implementation (epoll/kqueue) must define the following functions:
// void runtime·netpollinit(void); // to initialize the poller
// int32 runtime·netpollopen(uintptr fd, PollDesc *pd); // to arm edge-triggered notifications
// and associate fd with pd.
// An implementation must call the following function to denote that the pd is ready.
// void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
// PollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
// goroutines respectively. The semaphore can be in the following states:
// READY - io readiness notification is pending;
// a goroutine consumes the notification by changing the state to nil.
// WAIT - a goroutine prepares to park on the semaphore, but not yet parked;
// the goroutine commits to park by changing the state to G pointer,
// or, alternatively, concurrent io notification changes the state to READY,
// or, alternatively, concurrent timeout/close changes the state to nil.
// G pointer - the goroutine is blocked on the semaphore;
// io notification or timeout/close changes the state to READY or nil respectively
// and unparks the goroutine.
// nil - nothing of the above.
#define READY ((G*)1)
#define WAIT ((G*)2)
enum
{
PollBlockSize = 4*1024,
};
// time.go defines the layout of this structure.
// Keep in sync with time.go.
typedef struct Timer Timer;
struct Timer
{
intgo i;
int64 when;
int64 period;
FuncVal *fv;
Eface arg;
};
void runtime·addtimer(Timer*);
void runtime·deltimer(Timer*);
struct PollDesc
{
PollDesc* link; // in pollcache, protected by pollcache.lock
// The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
// This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO rediness notification)
// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
// in a lock-free way by all operations.
Mutex lock; // protects the following fields
uintptr fd;
bool closing;
uintptr seq; // protects from stale timers and ready notifications
G* rg; // READY, WAIT, G waiting for read or nil
Timer rt; // read deadline timer (set if rt.fv != nil)
int64 rd; // read deadline
G* wg; // READY, WAIT, G waiting for write or nil
Timer wt; // write deadline timer
int64 wd; // write deadline
void* user; // user settable cookie
};
static struct
{
Mutex lock;
PollDesc* first;
// PollDesc objects must be type-stable,
// because we can get ready notification from epoll/kqueue
// after the descriptor is closed/reused.
// Stale notifications are detected using seq variable,
// seq is incremented when deadlines are changed or descriptor is reused.
} pollcache;
static bool netpollblock(PollDesc*, int32, bool);
static G* netpollunblock(PollDesc*, int32, bool);
static void deadline(Eface);
static void readDeadline(Eface);
static void writeDeadline(Eface);
static PollDesc* allocPollDesc(void);
static intgo checkerr(PollDesc *pd, int32 mode);
static FuncVal deadlineFn = {(void(*)(void))deadline};
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
// runtimeNano returns the current value of the runtime clock in nanoseconds.
func runtimeNano() (ns int64) {
ns = runtime·nanotime();
}
func runtime_pollServerInit() {
runtime·netpollinit();
}
func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
pd = allocPollDesc();
runtime·lock(&pd->lock);
if(pd->wg != nil && pd->wg != READY)
runtime·throw("runtime_pollOpen: blocked write on free descriptor");
if(pd->rg != nil && pd->rg != READY)
runtime·throw("runtime_pollOpen: blocked read on free descriptor");
pd->fd = fd;
pd->closing = false;
pd->seq++;
pd->rg = nil;
pd->rd = 0;
pd->wg = nil;
pd->wd = 0;
runtime·unlock(&pd->lock);
errno = runtime·netpollopen(fd, pd);
}
func runtime_pollClose(pd *PollDesc) {
if(!pd->closing)
runtime·throw("runtime_pollClose: close w/o unblock");
if(pd->wg != nil && pd->wg != READY)
runtime·throw("runtime_pollClose: blocked write on closing descriptor");
if(pd->rg != nil && pd->rg != READY)
runtime·throw("runtime_pollClose: blocked read on closing descriptor");
runtime·netpollclose(pd->fd);
runtime·lock(&pollcache.lock);
pd->link = pollcache.first;
pollcache.first = pd;
runtime·unlock(&pollcache.lock);
}
func runtime_pollReset(pd *PollDesc, mode int) (err int) {
err = checkerr(pd, mode);
if(err)
goto ret;
if(mode == 'r')
pd->rg = nil;
else if(mode == 'w')
pd->wg = nil;
ret:
}
func runtime_pollWait(pd *PollDesc, mode int) (err int) {
err = checkerr(pd, mode);
if(err == 0) {
// As for now only Solaris uses level-triggered IO.
if(Solaris)
runtime·netpollarm(pd, mode);
while(!netpollblock(pd, mode, false)) {
err = checkerr(pd, mode);
if(err != 0)
break;
// Can happen if timeout has fired and unblocked us,
// but before we had a chance to run, timeout has been reset.
// Pretend it has not happened and retry.
}
}
}
func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
// This function is used only on windows after a failed attempt to cancel
// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
while(!netpollblock(pd, mode, true))
;
}
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
G *rg, *wg;
runtime·lock(&pd->lock);
if(pd->closing) {
runtime·unlock(&pd->lock);
return;
}
pd->seq++; // invalidate current timers
// Reset current timers.
if(pd->rt.fv) {
runtime·deltimer(&pd->rt);
pd->rt.fv = nil;
}
if(pd->wt.fv) {
runtime·deltimer(&pd->wt);
pd->wt.fv = nil;
}
// Setup new timers.
if(d != 0 && d <= runtime·nanotime())
d = -1;
if(mode == 'r' || mode == 'r'+'w')
pd->rd = d;
if(mode == 'w' || mode == 'r'+'w')
pd->wd = d;
if(pd->rd > 0 && pd->rd == pd->wd) {
pd->rt.fv = &deadlineFn;
pd->rt.when = pd->rd;
// Copy current seq into the timer arg.
// Timer func will check the seq against current descriptor seq,
// if they differ the descriptor was reused or timers were reset.
pd->rt.arg.type = (Type*)pd->seq;
pd->rt.arg.data = pd;
runtime·addtimer(&pd->rt);
} else {
if(pd->rd > 0) {
pd->rt.fv = &readDeadlineFn;
pd->rt.when = pd->rd;
pd->rt.arg.type = (Type*)pd->seq;
pd->rt.arg.data = pd;
runtime·addtimer(&pd->rt);
}
if(pd->wd > 0) {
pd->wt.fv = &writeDeadlineFn;
pd->wt.when = pd->wd;
pd->wt.arg.type = (Type*)pd->seq;
pd->wt.arg.data = pd;
runtime·addtimer(&pd->wt);
}
}
// If we set the new deadline in the past, unblock currently pending IO if any.
rg = nil;
runtime·atomicstorep(&wg, nil); // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
if(pd->rd < 0)
rg = netpollunblock(pd, 'r', false);
if(pd->wd < 0)
wg = netpollunblock(pd, 'w', false);
runtime·unlock(&pd->lock);
if(rg)
runtime·ready(rg);
if(wg)
runtime·ready(wg);
}
func runtime_pollUnblock(pd *PollDesc) {
G *rg, *wg;
runtime·lock(&pd->lock);
if(pd->closing)
runtime·throw("runtime_pollUnblock: already closing");
pd->closing = true;
pd->seq++;
runtime·atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock
rg = netpollunblock(pd, 'r', false);
wg = netpollunblock(pd, 'w', false);
if(pd->rt.fv) {
runtime·deltimer(&pd->rt);
pd->rt.fv = nil;
}
if(pd->wt.fv) {
runtime·deltimer(&pd->wt);
pd->wt.fv = nil;
}
runtime·unlock(&pd->lock);
if(rg)
runtime·ready(rg);
if(wg)
runtime·ready(wg);
}
uintptr
runtime·netpollfd(PollDesc *pd)
{
return pd->fd;
}
void**
runtime·netpolluser(PollDesc *pd)
{
return &pd->user;
}
bool
runtime·netpollclosing(PollDesc *pd)
{
return pd->closing;
}
void
runtime·netpolllock(PollDesc *pd)
{
runtime·lock(&pd->lock);
}
void
runtime·netpollunlock(PollDesc *pd)
{
runtime·unlock(&pd->lock);
}
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
void
runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
{
G *rg, *wg;
rg = wg = nil;
if(mode == 'r' || mode == 'r'+'w')
rg = netpollunblock(pd, 'r', true);
if(mode == 'w' || mode == 'r'+'w')
wg = netpollunblock(pd, 'w', true);
if(rg) {
rg->schedlink = *gpp;
*gpp = rg;
}
if(wg) {
wg->schedlink = *gpp;
*gpp = wg;
}
}
static intgo
checkerr(PollDesc *pd, int32 mode)
{
if(pd->closing)
return 1; // errClosing
if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
return 2; // errTimeout
return 0;
}
static bool
blockcommit(G *gp, G **gpp)
{
return runtime·casp(gpp, WAIT, gp);
}
// returns true if IO is ready, or false if timedout or closed
// waitio - wait only for completed IO, ignore errors
static bool
netpollblock(PollDesc *pd, int32 mode, bool waitio)
{
G **gpp, *old;
gpp = &pd->rg;
if(mode == 'w')
gpp = &pd->wg;
// set the gpp semaphore to WAIT
for(;;) {
old = *gpp;
if(old == READY) {
*gpp = nil;
return true;
}
if(old != nil)
runtime·throw("netpollblock: double wait");
if(runtime·casp(gpp, nil, WAIT))
break;
}
// need to recheck error states after setting gpp to WAIT
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
if(waitio || checkerr(pd, mode) == 0)
runtime·park((bool(*)(G*, void*))blockcommit, gpp, runtime·gostringnocopy((byte*)"IO wait"));
// be careful to not lose concurrent READY notification
old = runtime·xchgp(gpp, nil);
if(old > WAIT)
runtime·throw("netpollblock: corrupted state");
return old == READY;
}
static G*
netpollunblock(PollDesc *pd, int32 mode, bool ioready)
{
G **gpp, *old, *new;
gpp = &pd->rg;
if(mode == 'w')
gpp = &pd->wg;
for(;;) {
old = *gpp;
if(old == READY)
return nil;
if(old == nil && !ioready) {
// Only set READY for ioready. runtime_pollWait
// will check for timeout/cancel before waiting.
return nil;
}
new = nil;
if(ioready)
new = READY;
if(runtime·casp(gpp, old, new))
break;
}
if(old > WAIT)
return old; // must be G*
return nil;
}
static void
deadlineimpl(Eface arg, bool read, bool write)
{
PollDesc *pd;
uint32 seq;
G *rg, *wg;
pd = (PollDesc*)arg.data;
// This is the seq when the timer was set.
// If it's stale, ignore the timer event.
seq = (uintptr)arg.type;
rg = wg = nil;
runtime·lock(&pd->lock);
if(seq != pd->seq) {
// The descriptor was reused or timers were reset.
runtime·unlock(&pd->lock);
return;
}
if(read) {
if(pd->rd <= 0 || pd->rt.fv == nil)
runtime·throw("deadlineimpl: inconsistent read deadline");
pd->rd = -1;
runtime·atomicstorep(&pd->rt.fv, nil); // full memory barrier between store to rd and load of rg in netpollunblock
rg = netpollunblock(pd, 'r', false);
}
if(write) {
if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
runtime·throw("deadlineimpl: inconsistent write deadline");
pd->wd = -1;
runtime·atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
wg = netpollunblock(pd, 'w', false);
}
runtime·unlock(&pd->lock);
if(rg)
runtime·ready(rg);
if(wg)
runtime·ready(wg);
}
static void
deadline(Eface arg)
{
deadlineimpl(arg, true, true);
}
static void
readDeadline(Eface arg)
{
deadlineimpl(arg, true, false);
}
static void
writeDeadline(Eface arg)
{
deadlineimpl(arg, false, true);
}
static PollDesc*
allocPollDesc(void)
{
PollDesc *pd;
uint32 i, n;
runtime·lock(&pollcache.lock);
if(pollcache.first == nil) {
n = PollBlockSize/sizeof(*pd);
if(n == 0)
n = 1;
// Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals.
pd = runtime·persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
for(i = 0; i < n; i++) {
pd[i].link = pollcache.first;
pollcache.first = &pd[i];
}
}
pd = pollcache.first;
pollcache.first = pd->link;
runtime·unlock(&pollcache.lock);
return pd;
}

View File

@ -1,103 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
int32 runtime·epollcreate(int32 size);
int32 runtime·epollcreate1(int32 flags);
int32 runtime·epollctl(int32 epfd, int32 op, int32 fd, EpollEvent *ev);
int32 runtime·epollwait(int32 epfd, EpollEvent *ev, int32 nev, int32 timeout);
void runtime·closeonexec(int32 fd);
static int32 epfd = -1; // epoll descriptor
void
runtime·netpollinit(void)
{
epfd = runtime·epollcreate1(EPOLL_CLOEXEC);
if(epfd >= 0)
return;
epfd = runtime·epollcreate(1024);
if(epfd >= 0) {
runtime·closeonexec(epfd);
return;
}
runtime·printf("netpollinit: failed to create descriptor (%d)\n", -epfd);
runtime·throw("netpollinit: failed to create descriptor");
}
int32
runtime·netpollopen(uintptr fd, PollDesc *pd)
{
EpollEvent ev;
int32 res;
ev.events = EPOLLIN|EPOLLOUT|EPOLLRDHUP|EPOLLET;
*(uint64*)ev.data = (uint64)(uintptr)pd;
res = runtime·epollctl(epfd, EPOLL_CTL_ADD, (int32)fd, &ev);
return -res;
}
int32
runtime·netpollclose(uintptr fd)
{
EpollEvent ev;
int32 res;
res = runtime·epollctl(epfd, EPOLL_CTL_DEL, (int32)fd, &ev);
return -res;
}
void
runtime·netpollarm(PollDesc* pd, int32 mode)
{
USED(pd);
USED(mode);
runtime·throw("unused");
}
// polls for ready network connections
// returns list of goroutines that become runnable
G*
runtime·netpoll(bool block)
{
static int32 lasterr;
EpollEvent events[128], *ev;
int32 n, i, waitms, mode;
G *gp;
if(epfd == -1)
return nil;
waitms = -1;
if(!block)
waitms = 0;
retry:
n = runtime·epollwait(epfd, events, nelem(events), waitms);
if(n < 0) {
if(n != -EINTR && n != lasterr) {
lasterr = n;
runtime·printf("runtime: epollwait on fd %d failed with %d\n", epfd, -n);
}
goto retry;
}
gp = nil;
for(i = 0; i < n; i++) {
ev = &events[i];
if(ev->events == 0)
continue;
mode = 0;
if(ev->events & (EPOLLIN|EPOLLRDHUP|EPOLLHUP|EPOLLERR))
mode += 'r';
if(ev->events & (EPOLLOUT|EPOLLHUP|EPOLLERR))
mode += 'w';
if(mode)
runtime·netpollready(&gp, (void*)(uintptr)*(uint64*)ev->data, mode);
}
if(block && gp == nil)
goto retry;
return gp;
}

View File

@ -0,0 +1,97 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux
package runtime
import "unsafe"
func epollcreate(size int32) int32
func epollcreate1(flags int32) int32
//go:noescape
func epollctl(epfd, op, fd int32, ev *epollevent) int32
//go:noescape
func epollwait(epfd int32, ev *epollevent, nev, timeout int32) int32
func closeonexec(fd int32)
var (
epfd int32 = -1 // epoll descriptor
netpolllasterr int32
)
func netpollinit() {
epfd = epollcreate1(_EPOLL_CLOEXEC)
if epfd >= 0 {
return
}
epfd = epollcreate(1024)
if epfd >= 0 {
closeonexec(epfd)
return
}
println("netpollinit: failed to create epoll descriptor", -epfd)
gothrow("netpollinit: failed to create descriptor")
}
func netpollopen(fd uintptr, pd *pollDesc) int32 {
var ev epollevent
ev.events = _EPOLLIN | _EPOLLOUT | _EPOLLRDHUP | _EPOLLET
*(**pollDesc)(unsafe.Pointer(&ev.data)) = pd
return -epollctl(epfd, _EPOLL_CTL_ADD, int32(fd), &ev)
}
func netpollclose(fd uintptr) int32 {
var ev epollevent
return -epollctl(epfd, _EPOLL_CTL_DEL, int32(fd), &ev)
}
func netpollarm(pd *pollDesc, mode int) {
gothrow("unused")
}
// polls for ready network connections
// returns list of goroutines that become runnable
func netpoll(block bool) (gp *g) {
if epfd == -1 {
return
}
waitms := int32(-1)
if !block {
waitms = 0
}
var events [128]epollevent
retry:
n := epollwait(epfd, &events[0], int32(len(events)), waitms)
if n < 0 {
if n != -_EINTR && n != netpolllasterr {
netpolllasterr = n
println("runtime: epollwait on fd", epfd, "failed with", -n)
}
goto retry
}
for i := int32(0); i < n; i++ {
ev := &events[i]
if ev.events == 0 {
continue
}
var mode int32
if ev.events&(_EPOLLIN|_EPOLLRDHUP|_EPOLLHUP|_EPOLLERR) != 0 {
mode += 'r'
}
if ev.events&(_EPOLLOUT|_EPOLLHUP|_EPOLLERR) != 0 {
mode += 'w'
}
if mode != 0 {
pd := *(**pollDesc)(unsafe.Pointer(&ev.data))
netpollready((**g)(noescape(unsafe.Pointer(&gp))), pd, mode)
}
}
if block && gp == nil {
goto retry
}
return gp
}

View File

@ -1,111 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd netbsd openbsd
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
#include "os_GOOS.h"
// Integrated network poller (kqueue-based implementation).
int32 runtime·kqueue(void);
int32 runtime·kevent(int32, KeventT*, int32, KeventT*, int32, Timespec*);
void runtime·closeonexec(int32);
static int32 kq = -1;
void
runtime·netpollinit(void)
{
kq = runtime·kqueue();
if(kq < 0) {
runtime·printf("netpollinit: kqueue failed with %d\n", -kq);
runtime·throw("netpollinit: kqueue failed");
}
runtime·closeonexec(kq);
}
int32
runtime·netpollopen(uintptr fd, PollDesc *pd)
{
KeventT ev[2];
int32 n;
// Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
// for the whole fd lifetime. The notifications are automatically unregistered
// when fd is closed.
ev[0].ident = (uint32)fd;
ev[0].filter = EVFILT_READ;
ev[0].flags = EV_ADD|EV_CLEAR;
ev[0].fflags = 0;
ev[0].data = 0;
ev[0].udata = (kevent_udata)pd;
ev[1] = ev[0];
ev[1].filter = EVFILT_WRITE;
n = runtime·kevent(kq, ev, 2, nil, 0, nil);
if(n < 0)
return -n;
return 0;
}
int32
runtime·netpollclose(uintptr fd)
{
// Don't need to unregister because calling close()
// on fd will remove any kevents that reference the descriptor.
USED(fd);
return 0;
}
void
runtime·netpollarm(PollDesc* pd, int32 mode)
{
USED(pd, mode);
runtime·throw("unused");
}
// Polls for ready network connections.
// Returns list of goroutines that become runnable.
G*
runtime·netpoll(bool block)
{
static int32 lasterr;
KeventT events[64], *ev;
Timespec ts, *tp;
int32 n, i, mode;
G *gp;
if(kq == -1)
return nil;
tp = nil;
if(!block) {
ts.tv_sec = 0;
ts.tv_nsec = 0;
tp = &ts;
}
gp = nil;
retry:
n = runtime·kevent(kq, nil, 0, events, nelem(events), tp);
if(n < 0) {
if(n != -EINTR && n != lasterr) {
lasterr = n;
runtime·printf("runtime: kevent on fd %d failed with %d\n", kq, -n);
}
goto retry;
}
for(i = 0; i < n; i++) {
ev = &events[i];
mode = 0;
if(ev->filter == EVFILT_READ)
mode += 'r';
if(ev->filter == EVFILT_WRITE)
mode += 'w';
if(mode)
runtime·netpollready(&gp, (PollDesc*)ev->udata, mode);
}
if(block && gp == nil)
goto retry;
return gp;
}

View File

@ -0,0 +1,101 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd netbsd openbsd
package runtime
// Integrated network poller (kqueue-based implementation).
import "unsafe"
func kqueue() int32
//go:noescape
func kevent(kq int32, ch *keventt, nch int32, ev *keventt, nev int32, ts *timespec) int32
func closeonexec(fd int32)
var (
kq int32 = -1
netpolllasterr int32
)
func netpollinit() {
kq = kqueue()
if kq < 0 {
println("netpollinit: kqueue failed with", -kq)
gothrow("netpollinit: kqueue failed")
}
closeonexec(kq)
}
func netpollopen(fd uintptr, pd *pollDesc) int32 {
// Arm both EVFILT_READ and EVFILT_WRITE in edge-triggered mode (EV_CLEAR)
// for the whole fd lifetime. The notifications are automatically unregistered
// when fd is closed.
var ev [2]keventt
*(*uintptr)(unsafe.Pointer(&ev[0].ident)) = fd
ev[0].filter = _EVFILT_READ
ev[0].flags = _EV_ADD | _EV_CLEAR
ev[0].fflags = 0
ev[0].data = 0
ev[0].udata = (*byte)(unsafe.Pointer(pd))
ev[1] = ev[0]
ev[1].filter = _EVFILT_WRITE
n := kevent(kq, &ev[0], 2, nil, 0, nil)
if n < 0 {
return -n
}
return 0
}
func netpollclose(fd uintptr) int32 {
// Don't need to unregister because calling close()
// on fd will remove any kevents that reference the descriptor.
return 0
}
func netpollarm(pd *pollDesc, mode int) {
gothrow("unused")
}
// Polls for ready network connections.
// Returns list of goroutines that become runnable.
func netpoll(block bool) (gp *g) {
if kq == -1 {
return
}
var tp *timespec
var ts timespec
if !block {
tp = &ts
}
var events [64]keventt
retry:
n := kevent(kq, nil, 0, &events[0], int32(len(events)), tp)
if n < 0 {
if n != -_EINTR && n != netpolllasterr {
netpolllasterr = n
println("runtime: kevent on fd", kq, "failed with", -n)
}
goto retry
}
for i := 0; i < int(n); i++ {
ev := &events[i]
var mode int32
if ev.filter == _EVFILT_READ {
mode += 'r'
}
if ev.filter == _EVFILT_WRITE {
mode += 'w'
}
if mode != 0 {
netpollready((**g)(noescape(unsafe.Pointer(&gp))), (*pollDesc)(unsafe.Pointer(ev.udata)), mode)
}
}
if block && gp == nil {
goto retry
}
return gp
}

View File

@ -1,37 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
#include "os_GOOS.h"
// Fake network poller for NaCl.
// Should never be used, because NaCl network connections do not honor "SetNonblock".
void
runtime·netpollinit(void)
{
}
int32
runtime·netpollopen(uintptr fd, PollDesc *pd)
{
USED(fd);
USED(pd);
return 0;
}
int32
runtime·netpollclose(uintptr fd)
{
USED(fd);
return 0;
}
G*
runtime·netpoll(bool block)
{
USED(block);
return nil;
}

View File

@ -0,0 +1,26 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Fake network poller for NaCl.
// Should never be used, because NaCl network connections do not honor "SetNonblock".
package runtime
func netpollinit() {
}
func netpollopen(fd uintptr, pd *pollDesc) int32 {
return 0
}
func netpollclose(fd uintptr) int32 {
return 0
}
func netpollarm(pd *pollDesc, mode int) {
}
func netpoll(block bool) *g {
return nil
}

View File

@ -18,9 +18,6 @@ func sigaction(mode uint32, new, old unsafe.Pointer)
func sigaltstack(new, old unsafe.Pointer)
func sigtramp()
func setitimer(mode int32, new, old unsafe.Pointer)
func kqueue() int32
func kevent(fd int32, ev1 unsafe.Pointer, nev1 int32, ev2 unsafe.Pointer, nev2 int32, ts unsafe.Pointer) int32
func closeonexec(fd int32)
func mach_semaphore_wait(sema uint32) int32
func mach_semaphore_timedwait(sema, sec, nsec uint32) int32
func mach_semaphore_signal(sema uint32) int32

View File

@ -14,9 +14,6 @@ func setitimer(mode int32, new, old unsafe.Pointer)
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func getrlimit(kind int32, limit unsafe.Pointer) int32
func raise(sig int32)
func kqueue() int32
func kevent(fd int32, ev1 unsafe.Pointer, nev1 int32, ev2 unsafe.Pointer, nev2 int32, ts unsafe.Pointer) int32
func closeonexec(fd int32)
func sys_umtx_sleep(addr unsafe.Pointer, val, timeout int32) int32
func sys_umtx_wakeup(addr unsafe.Pointer, val int32) int32

View File

@ -14,9 +14,6 @@ func setitimer(mode int32, new, old unsafe.Pointer)
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func getrlimit(kind int32, limit unsafe.Pointer) int32
func raise(sig int32)
func kqueue() int32
func kevent(fd int32, ev1 unsafe.Pointer, nev1 int32, ev2 unsafe.Pointer, nev2 int32, ts unsafe.Pointer) int32
func closeonexec(fd int32)
func sys_umtx_op(addr unsafe.Pointer, mode int32, val uint32, ptr2, ts unsafe.Pointer) int32
const stackSystem = 0

View File

@ -14,11 +14,6 @@ func setitimer(mode int32, new, old unsafe.Pointer)
func rtsigprocmask(sig int32, new, old unsafe.Pointer, size int32)
func getrlimit(kind int32, limit unsafe.Pointer) int32
func raise(sig int32)
func epollcreate(size int32) int32
func epollcreate1(flags int32) int32
func epollctl(epfd, op, fd int32, ev unsafe.Pointer) int32
func epollwait(epfd int32, ev unsafe.Pointer, nev, timeout int32) int32
func closeonexec(fd int32)
func sched_getaffinity(pid, len uintptr, buf *uintptr) int32
const stackSystem = 0

View File

@ -13,9 +13,6 @@ func sigprocmask(mode int32, new, old unsafe.Pointer)
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func lwp_tramp()
func raise(sig int32)
func kqueue() int32
func kevent(fd int32, ev1 unsafe.Pointer, nev1 int32, ev2 unsafe.Pointer, nev2 int32, ts unsafe.Pointer) int32
func closeonexec(fd int32)
func getcontext(ctxt unsafe.Pointer)
func lwp_create(ctxt unsafe.Pointer, flags uintptr, lwpid unsafe.Pointer) int32
func lwp_park(abstime unsafe.Pointer, unpark int32, hint, unparkhint unsafe.Pointer) int32

View File

@ -12,9 +12,6 @@ func sigaltstack(new, old unsafe.Pointer)
func sigprocmask(mode int32, new uint32) uint32
func sysctl(mib *uint32, miblen uint32, out *byte, size *uintptr, dst *byte, ndst uintptr) int32
func raise(sig int32)
func kqueue() int32
func kevent(fd int32, ev1 unsafe.Pointer, nev1 int32, ev2 unsafe.Pointer, nev2 int32, ts unsafe.Pointer) int32
func closeonexec(fd int32)
func tfork(param unsafe.Pointer, psize uintptr, mm, gg, fn unsafe.Pointer) int32
func thrsleep(ident unsafe.Pointer, clock_id int32, tsp, lock, abort unsafe.Pointer) int32
func thrwakeup(ident unsafe.Pointer, n int32) int32

View File

@ -19,6 +19,10 @@ func tstart_sysvicall(mm unsafe.Pointer) uint32
func nanotime1() int64
func usleep1(usec uint32)
func osyield1()
func netpollinit()
func netpollopen(fd uintptr, pd *pollDesc) int32
func netpollclose(fd uintptr) int32
func netpollarm(pd *pollDesc, mode int)
type libcFunc byte

View File

@ -21,6 +21,10 @@ func asmstdcall(fn unsafe.Pointer)
func getlasterror() uint32
func setlasterror(err uint32)
func usleep1(usec uint32)
func netpollinit()
func netpollopen(fd uintptr, pd *pollDesc) int32
func netpollclose(fd uintptr) int32
func netpollarm(pd *pollDesc, mode int)
const stackSystem = 512 * ptrSize

View File

@ -886,12 +886,8 @@ int64 runtime·cputicks(void);
int64 runtime·tickspersecond(void);
void runtime·blockevent(int64, intgo);
G* runtime·netpoll(bool);
void runtime·netpollinit(void);
int32 runtime·netpollopen(uintptr, PollDesc*);
int32 runtime·netpollclose(uintptr);
void runtime·netpollready(G**, PollDesc*, int32);
uintptr runtime·netpollfd(PollDesc*);
void runtime·netpollarm(PollDesc*, int32);
void** runtime·netpolluser(PollDesc*);
bool runtime·netpollclosing(PollDesc*);
void runtime·netpolllock(PollDesc*);

View File

@ -237,6 +237,9 @@ func xchg64(ptr *uint64, new uint64) uint64
//go:noescape
func xchgp(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer
//go:noescape
func xchguintptr(ptr *uintptr, new uintptr) uintptr
//go:noescape
func atomicstore(ptr *uint32, val uint32)

View File

@ -457,7 +457,7 @@ TEXT runtime·epollcreate1(SB),NOSPLIT,$0
MOVL AX, ret+4(FP)
RET
// int32 runtime·epollctl(int32 epfd, int32 op, int32 fd, EpollEvent *ev);
// func epollctl(epfd, op, fd int32, ev *epollEvent) int
TEXT runtime·epollctl(SB),NOSPLIT,$0
MOVL $255, AX
MOVL epfd+0(FP), BX

View File

@ -378,7 +378,7 @@ TEXT runtime·epollcreate1(SB),NOSPLIT,$0
MOVL AX, ret+8(FP)
RET
// int32 runtime·epollctl(int32 epfd, int32 op, int32 fd, EpollEvent *ev);
// func epollctl(epfd, op, fd int32, ev *epollEvent) int
TEXT runtime·epollctl(SB),NOSPLIT,$0
MOVL epfd+0(FP), DI
MOVL op+4(FP), SI

View File

@ -424,12 +424,12 @@ TEXT runtime·epollcreate1(SB),NOSPLIT,$0
MOVW R0, ret+4(FP)
RET
// int32 runtime·epollctl(int32 epfd, int32 op, int32 fd, EpollEvent *ev)
// func epollctl(epfd, op, fd int32, ev *epollEvent) int
TEXT runtime·epollctl(SB),NOSPLIT,$0
MOVW 0(FP), R0
MOVW 4(FP), R1
MOVW 8(FP), R2
MOVW 12(FP), R3
MOVW epfd+0(FP), R0
MOVW op+4(FP), R1
MOVW fd+8(FP), R2
MOVW ev+12(FP), R3
MOVW $SYS_epoll_ctl, R7
SWI $0
MOVW R0, ret+16(FP)

View File

@ -11,6 +11,9 @@
#define JMP B
#endif
TEXT net·runtimeNano(SB),NOSPLIT,$0-0
JMP runtime·nanotime(SB)
TEXT time·runtimeNano(SB),NOSPLIT,$0-0
JMP runtime·nanotime(SB)
@ -74,6 +77,30 @@ TEXT reflect·chanrecv(SB), NOSPLIT, $0-0
TEXT runtimedebug·freeOSMemory(SB), NOSPLIT, $0-0
JMP runtime·freeOSMemory(SB)
TEXT net·runtime_pollServerInit(SB),NOSPLIT,$0-0
JMP runtime·netpollServerInit(SB)
TEXT net·runtime_pollOpen(SB),NOSPLIT,$0-0
JMP runtime·netpollOpen(SB)
TEXT net·runtime_pollClose(SB),NOSPLIT,$0-0
JMP runtime·netpollClose(SB)
TEXT net·runtime_pollReset(SB),NOSPLIT,$0-0
JMP runtime·netpollReset(SB)
TEXT net·runtime_pollWait(SB),NOSPLIT,$0-0
JMP runtime·netpollWait(SB)
TEXT net·runtime_pollWaitCanceled(SB),NOSPLIT,$0-0
JMP runtime·netpollWaitCanceled(SB)
TEXT net·runtime_pollSetDeadline(SB),NOSPLIT,$0-0
JMP runtime·netpollSetDeadline(SB)
TEXT net·runtime_pollUnblock(SB),NOSPLIT,$0-0
JMP runtime·netpollUnblock(SB)
TEXT syscall·setenv_c(SB), NOSPLIT, $0-0
JMP runtime·syscall_setenv_c(SB)

View File

@ -9,7 +9,7 @@ package runtime
import "unsafe"
// Package time knows the layout of this structure.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer and netpoll.goc:/timer.
// If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
// For GOOS=nacl, package syscall knows the layout of this structure.
// If this struct changes, adjust ../syscall/net_nacl.go:/runtimeTimer.
type timer struct {
@ -20,8 +20,9 @@ type timer struct {
// a well-behaved function and not block.
when int64
period int64
f func(interface{})
f func(interface{}, uintptr)
arg interface{}
seq uintptr
}
var timers struct {
@ -74,7 +75,7 @@ func stopTimer(t *timer) bool {
// Go runtime.
// Ready the goroutine arg.
func goroutineReady(arg interface{}) {
func goroutineReady(arg interface{}, seq uintptr) {
goready(arg.(*g))
}
@ -185,11 +186,12 @@ func timerproc() {
}
f := t.f
arg := t.arg
seq := t.seq
unlock(&timers.lock)
if raceenabled {
raceacquire(unsafe.Pointer(t))
}
f(arg)
f(arg, seq)
lock(&timers.lock)
}
if delta < 0 {

View File

@ -21,8 +21,9 @@ type runtimeTimer struct {
i int
when int64
period int64
f func(interface{}) // NOTE: must not be closure
f func(interface{}, uintptr) // NOTE: must not be closure
arg interface{}
seq uintptr
}
func startTimer(*runtimeTimer)
@ -49,7 +50,7 @@ func (t *timer) stop() {
stopTimer(&t.r)
}
func timerExpired(i interface{}) {
func timerExpired(i interface{}, seq uintptr) {
t := i.(*timer)
go func() {
t.q.Lock()

View File

@ -12,7 +12,7 @@ func init() {
var Interrupt = interrupt
var DaysIn = daysIn
func empty(arg interface{}) {}
func empty(arg interface{}, seq uintptr) {}
// Test that a runtimeTimer with a duration so large it overflows
// does not cause other timers to hang.

View File

@ -17,8 +17,9 @@ type runtimeTimer struct {
i int
when int64
period int64
f func(interface{}) // NOTE: must not be closure
f func(interface{}, uintptr) // NOTE: must not be closure
arg interface{}
seq uintptr
}
// when is a helper function for setting the 'when' field of a runtimeTimer.
@ -83,7 +84,7 @@ func (t *Timer) Reset(d Duration) bool {
return active
}
func sendTime(c interface{}) {
func sendTime(c interface{}, seq uintptr) {
// Non-blocking send of time on c.
// Used in NewTimer, it cannot block anyway (buffer).
// Used in NewTicker, dropping sends on the floor is
@ -117,6 +118,6 @@ func AfterFunc(d Duration, f func()) *Timer {
return t
}
func goFunc(arg interface{}) {
func goFunc(arg interface{}, seq uintptr) {
go arg.(func())()
}