2013-03-14 00:38:37 -06:00
|
|
|
// Copyright 2013 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
all: merge NaCl branch (part 1)
See golang.org/s/go13nacl for design overview.
This CL is the mostly mechanical changes from rsc's Go 1.2 based NaCl branch, specifically 39cb35750369 to 500771b477cf from https://code.google.com/r/rsc-go13nacl. This CL does not include working NaCl support, there are probably two or three more large merges to come.
CL 15750044 is not included as it involves more invasive changes to the linker which will need to be merged separately.
The exact change lists included are
15050047: syscall: support for Native Client
15360044: syscall: unzip implementation for Native Client
15370044: syscall: Native Client SRPC implementation
15400047: cmd/dist, cmd/go, go/build, test: support for Native Client
15410048: runtime: support for Native Client
15410049: syscall: file descriptor table for Native Client
15410050: syscall: in-memory file system for Native Client
15440048: all: update +build lines for Native Client port
15540045: cmd/6g, cmd/8g, cmd/gc: support for Native Client
15570045: os: support for Native Client
15680044: crypto/..., hash/crc32, reflect, sync/atomic: support for amd64p32
15690044: net: support for Native Client
15690048: runtime: support for fake time like on Go Playground
15690051: build: disable various tests on Native Client
LGTM=rsc
R=rsc
CC=golang-codereviews
https://golang.org/cl/68150047
2014-02-25 07:47:42 -07:00
|
|
|
// +build darwin dragonfly freebsd linux nacl netbsd openbsd solaris windows
|
2013-03-14 00:59:55 -06:00
|
|
|
|
2013-03-14 00:38:37 -06:00
|
|
|
package net
|
|
|
|
|
|
|
|
#include "runtime.h"
|
|
|
|
#include "defs_GOOS_GOARCH.h"
|
|
|
|
#include "arch_GOARCH.h"
|
|
|
|
#include "malloc.h"
|
|
|
|
|
|
|
|
// Integrated network poller (platform-independent part).
|
|
|
|
// A particular implementation (epoll/kqueue) must define the following functions:
|
|
|
|
// void runtime·netpollinit(void); // to initialize the poller
|
2013-05-19 20:55:50 -06:00
|
|
|
// int32 runtime·netpollopen(uintptr fd, PollDesc *pd); // to arm edge-triggered notifications
|
2013-03-14 00:38:37 -06:00
|
|
|
// and associate fd with pd.
|
|
|
|
// An implementation must call the following function to denote that the pd is ready.
|
|
|
|
// void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
|
|
|
|
|
2014-01-22 00:27:16 -07:00
|
|
|
// PollDesc contains 2 binary semaphores, rg and wg, to park reader and writer
|
|
|
|
// goroutines respectively. The semaphore can be in the following states:
|
|
|
|
// READY - io readiness notification is pending;
|
|
|
|
// a goroutine consumes the notification by changing the state to nil.
|
|
|
|
// WAIT - a goroutine prepares to park on the semaphore, but not yet parked;
|
|
|
|
// the goroutine commits to park by changing the state to G pointer,
|
|
|
|
// or, alternatively, concurrent io notification changes the state to READY,
|
|
|
|
// or, alternatively, concurrent timeout/close changes the state to nil.
|
|
|
|
// G pointer - the goroutine is blocked on the semaphore;
|
|
|
|
// io notification or timeout/close changes the state to READY or nil respectively
|
|
|
|
// and unparks the goroutine.
|
|
|
|
// nil - nothing of the above.
|
2013-03-14 00:38:37 -06:00
|
|
|
#define READY ((G*)1)
|
2014-01-22 00:27:16 -07:00
|
|
|
#define WAIT ((G*)2)
|
2013-03-14 00:38:37 -06:00
|
|
|
|
2014-01-30 02:28:19 -07:00
|
|
|
enum
|
|
|
|
{
|
|
|
|
PollBlockSize = 4*1024,
|
|
|
|
};
|
|
|
|
|
2013-03-14 00:38:37 -06:00
|
|
|
struct PollDesc
|
|
|
|
{
|
|
|
|
PollDesc* link; // in pollcache, protected by pollcache.Lock
|
2014-01-22 00:27:16 -07:00
|
|
|
|
|
|
|
// The lock protects pollOpen, pollSetDeadline, pollUnblock and deadlineimpl operations.
|
|
|
|
// This fully covers seq, rt and wt variables. fd is constant throughout the PollDesc lifetime.
|
|
|
|
// pollReset, pollWait, pollWaitCanceled and runtime·netpollready (IO rediness notification)
|
|
|
|
// proceed w/o taking the lock. So closing, rg, rd, wg and wd are manipulated
|
|
|
|
// in a lock-free way by all operations.
|
2013-03-14 00:38:37 -06:00
|
|
|
Lock; // protectes the following fields
|
2013-05-19 20:55:50 -06:00
|
|
|
uintptr fd;
|
2013-03-14 00:38:37 -06:00
|
|
|
bool closing;
|
|
|
|
uintptr seq; // protects from stale timers and ready notifications
|
2014-01-22 00:27:16 -07:00
|
|
|
G* rg; // READY, WAIT, G waiting for read or nil
|
2013-03-14 00:38:37 -06:00
|
|
|
Timer rt; // read deadline timer (set if rt.fv != nil)
|
|
|
|
int64 rd; // read deadline
|
2014-01-22 00:27:16 -07:00
|
|
|
G* wg; // READY, WAIT, G waiting for write or nil
|
|
|
|
Timer wt; // write deadline timer
|
|
|
|
int64 wd; // write deadline
|
2014-02-24 20:31:01 -07:00
|
|
|
void* user; // user settable cookie
|
2013-03-14 00:38:37 -06:00
|
|
|
};
|
|
|
|
|
|
|
|
static struct
|
|
|
|
{
|
|
|
|
Lock;
|
|
|
|
PollDesc* first;
|
|
|
|
// PollDesc objects must be type-stable,
|
|
|
|
// because we can get ready notification from epoll/kqueue
|
|
|
|
// after the descriptor is closed/reused.
|
|
|
|
// Stale notifications are detected using seq variable,
|
|
|
|
// seq is incremented when deadlines are changed or descriptor is reused.
|
|
|
|
} pollcache;
|
|
|
|
|
2014-01-22 00:27:16 -07:00
|
|
|
static bool netpollblock(PollDesc*, int32, bool);
|
2013-06-24 20:29:00 -06:00
|
|
|
static G* netpollunblock(PollDesc*, int32, bool);
|
2013-03-14 00:38:37 -06:00
|
|
|
static void deadline(int64, Eface);
|
|
|
|
static void readDeadline(int64, Eface);
|
|
|
|
static void writeDeadline(int64, Eface);
|
|
|
|
static PollDesc* allocPollDesc(void);
|
|
|
|
static intgo checkerr(PollDesc *pd, int32 mode);
|
|
|
|
|
|
|
|
static FuncVal deadlineFn = {(void(*)(void))deadline};
|
|
|
|
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
|
|
|
|
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
|
|
|
|
|
runtime: use monotonic clock for timers (linux/386, linux/amd64)
This lays the groundwork for making Go robust when the system's
calendar time jumps around. All input values to the runtimeTimer
struct now use the runtime clock as a common reference point.
This affects net.Conn.Set[Read|Write]Deadline(), time.Sleep(),
time.Timer, etc. Under normal conditions, behavior is unchanged.
Each platform and architecture's implementation of runtime·nanotime()
should be modified to use a monotonic system clock when possible.
Platforms/architectures modified and tested with monotonic clock:
linux/x86 - clock_gettime(CLOCK_MONOTONIC)
Update #6007
LGTM=dvyukov, rsc
R=golang-codereviews, dvyukov, alex.brainman, stephen.gutekanst, dave, rsc, mikioh.mikioh
CC=golang-codereviews
https://golang.org/cl/53010043
2014-02-24 08:57:46 -07:00
|
|
|
// runtimeNano returns the current value of the runtime clock in nanoseconds.
|
|
|
|
func runtimeNano() (ns int64) {
|
|
|
|
ns = runtime·nanotime();
|
|
|
|
}
|
|
|
|
|
2013-03-14 00:38:37 -06:00
|
|
|
func runtime_pollServerInit() {
|
|
|
|
runtime·netpollinit();
|
|
|
|
}
|
|
|
|
|
2013-05-19 20:55:50 -06:00
|
|
|
func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
|
2013-03-14 00:38:37 -06:00
|
|
|
pd = allocPollDesc();
|
|
|
|
runtime·lock(pd);
|
|
|
|
if(pd->wg != nil && pd->wg != READY)
|
|
|
|
runtime·throw("runtime_pollOpen: blocked write on free descriptor");
|
|
|
|
if(pd->rg != nil && pd->rg != READY)
|
|
|
|
runtime·throw("runtime_pollOpen: blocked read on free descriptor");
|
2013-03-21 02:54:19 -06:00
|
|
|
pd->fd = fd;
|
2013-03-14 00:38:37 -06:00
|
|
|
pd->closing = false;
|
|
|
|
pd->seq++;
|
|
|
|
pd->rg = nil;
|
|
|
|
pd->rd = 0;
|
|
|
|
pd->wg = nil;
|
|
|
|
pd->wd = 0;
|
|
|
|
runtime·unlock(pd);
|
|
|
|
|
|
|
|
errno = runtime·netpollopen(fd, pd);
|
|
|
|
}
|
|
|
|
|
|
|
|
func runtime_pollClose(pd *PollDesc) {
|
|
|
|
if(!pd->closing)
|
|
|
|
runtime·throw("runtime_pollClose: close w/o unblock");
|
|
|
|
if(pd->wg != nil && pd->wg != READY)
|
|
|
|
runtime·throw("runtime_pollClose: blocked write on closing descriptor");
|
|
|
|
if(pd->rg != nil && pd->rg != READY)
|
|
|
|
runtime·throw("runtime_pollClose: blocked read on closing descriptor");
|
2013-03-21 02:54:19 -06:00
|
|
|
runtime·netpollclose(pd->fd);
|
2013-03-14 00:38:37 -06:00
|
|
|
runtime·lock(&pollcache);
|
|
|
|
pd->link = pollcache.first;
|
|
|
|
pollcache.first = pd;
|
|
|
|
runtime·unlock(&pollcache);
|
|
|
|
}
|
|
|
|
|
|
|
|
func runtime_pollReset(pd *PollDesc, mode int) (err int) {
|
|
|
|
err = checkerr(pd, mode);
|
|
|
|
if(err)
|
|
|
|
goto ret;
|
|
|
|
if(mode == 'r')
|
|
|
|
pd->rg = nil;
|
|
|
|
else if(mode == 'w')
|
|
|
|
pd->wg = nil;
|
|
|
|
ret:
|
|
|
|
}
|
|
|
|
|
|
|
|
func runtime_pollWait(pd *PollDesc, mode int) (err int) {
|
|
|
|
err = checkerr(pd, mode);
|
2013-06-24 20:29:00 -06:00
|
|
|
if(err == 0) {
|
2014-02-12 11:24:29 -07:00
|
|
|
// As for now only Solaris uses level-triggered IO.
|
|
|
|
if(Solaris)
|
2014-02-24 20:31:01 -07:00
|
|
|
runtime·netpollarm(pd, mode);
|
2014-01-22 00:27:16 -07:00
|
|
|
while(!netpollblock(pd, mode, false)) {
|
2013-06-24 20:29:00 -06:00
|
|
|
err = checkerr(pd, mode);
|
2013-08-13 02:55:57 -06:00
|
|
|
if(err != 0)
|
|
|
|
break;
|
|
|
|
// Can happen if timeout has fired and unblocked us,
|
|
|
|
// but before we had a chance to run, timeout has been reset.
|
|
|
|
// Pretend it has not happened and retry.
|
2013-06-24 20:29:00 -06:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
|
2014-02-12 11:24:29 -07:00
|
|
|
// This function is used only on windows after a failed attempt to cancel
|
|
|
|
// a pending async IO operation. Wait for ioready, ignore closing or timeouts.
|
2014-01-22 00:27:16 -07:00
|
|
|
while(!netpollblock(pd, mode, true))
|
2013-06-24 20:29:00 -06:00
|
|
|
;
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
|
2013-08-13 09:11:42 -06:00
|
|
|
G *rg, *wg;
|
|
|
|
|
2013-03-14 00:38:37 -06:00
|
|
|
runtime·lock(pd);
|
2013-08-13 09:11:42 -06:00
|
|
|
if(pd->closing) {
|
|
|
|
runtime·unlock(pd);
|
|
|
|
return;
|
|
|
|
}
|
2013-03-14 00:38:37 -06:00
|
|
|
pd->seq++; // invalidate current timers
|
|
|
|
// Reset current timers.
|
|
|
|
if(pd->rt.fv) {
|
|
|
|
runtime·deltimer(&pd->rt);
|
|
|
|
pd->rt.fv = nil;
|
|
|
|
}
|
|
|
|
if(pd->wt.fv) {
|
|
|
|
runtime·deltimer(&pd->wt);
|
|
|
|
pd->wt.fv = nil;
|
|
|
|
}
|
|
|
|
// Setup new timers.
|
2013-08-13 09:11:42 -06:00
|
|
|
if(d != 0 && d <= runtime·nanotime())
|
2013-03-14 00:38:37 -06:00
|
|
|
d = -1;
|
|
|
|
if(mode == 'r' || mode == 'r'+'w')
|
|
|
|
pd->rd = d;
|
|
|
|
if(mode == 'w' || mode == 'r'+'w')
|
|
|
|
pd->wd = d;
|
|
|
|
if(pd->rd > 0 && pd->rd == pd->wd) {
|
|
|
|
pd->rt.fv = &deadlineFn;
|
|
|
|
pd->rt.when = pd->rd;
|
|
|
|
// Copy current seq into the timer arg.
|
|
|
|
// Timer func will check the seq against current descriptor seq,
|
|
|
|
// if they differ the descriptor was reused or timers were reset.
|
|
|
|
pd->rt.arg.type = (Type*)pd->seq;
|
|
|
|
pd->rt.arg.data = pd;
|
|
|
|
runtime·addtimer(&pd->rt);
|
|
|
|
} else {
|
|
|
|
if(pd->rd > 0) {
|
|
|
|
pd->rt.fv = &readDeadlineFn;
|
|
|
|
pd->rt.when = pd->rd;
|
|
|
|
pd->rt.arg.type = (Type*)pd->seq;
|
|
|
|
pd->rt.arg.data = pd;
|
|
|
|
runtime·addtimer(&pd->rt);
|
|
|
|
}
|
|
|
|
if(pd->wd > 0) {
|
|
|
|
pd->wt.fv = &writeDeadlineFn;
|
|
|
|
pd->wt.when = pd->wd;
|
|
|
|
pd->wt.arg.type = (Type*)pd->seq;
|
|
|
|
pd->wt.arg.data = pd;
|
|
|
|
runtime·addtimer(&pd->wt);
|
|
|
|
}
|
|
|
|
}
|
2013-08-13 09:11:42 -06:00
|
|
|
// If we set the new deadline in the past, unblock currently pending IO if any.
|
|
|
|
rg = nil;
|
2014-01-22 00:27:16 -07:00
|
|
|
runtime·atomicstorep(&wg, nil); // full memory barrier between stores to rd/wd and load of rg/wg in netpollunblock
|
2013-08-13 09:11:42 -06:00
|
|
|
if(pd->rd < 0)
|
|
|
|
rg = netpollunblock(pd, 'r', false);
|
|
|
|
if(pd->wd < 0)
|
|
|
|
wg = netpollunblock(pd, 'w', false);
|
2013-03-14 00:38:37 -06:00
|
|
|
runtime·unlock(pd);
|
2013-08-13 09:11:42 -06:00
|
|
|
if(rg)
|
|
|
|
runtime·ready(rg);
|
|
|
|
if(wg)
|
|
|
|
runtime·ready(wg);
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
func runtime_pollUnblock(pd *PollDesc) {
|
|
|
|
G *rg, *wg;
|
|
|
|
|
|
|
|
runtime·lock(pd);
|
|
|
|
if(pd->closing)
|
|
|
|
runtime·throw("runtime_pollUnblock: already closing");
|
|
|
|
pd->closing = true;
|
|
|
|
pd->seq++;
|
2014-01-22 00:27:16 -07:00
|
|
|
runtime·atomicstorep(&rg, nil); // full memory barrier between store to closing and read of rg/wg in netpollunblock
|
2013-06-24 20:29:00 -06:00
|
|
|
rg = netpollunblock(pd, 'r', false);
|
|
|
|
wg = netpollunblock(pd, 'w', false);
|
2013-03-14 00:38:37 -06:00
|
|
|
if(pd->rt.fv) {
|
|
|
|
runtime·deltimer(&pd->rt);
|
|
|
|
pd->rt.fv = nil;
|
|
|
|
}
|
|
|
|
if(pd->wt.fv) {
|
|
|
|
runtime·deltimer(&pd->wt);
|
|
|
|
pd->wt.fv = nil;
|
|
|
|
}
|
|
|
|
runtime·unlock(pd);
|
|
|
|
if(rg)
|
|
|
|
runtime·ready(rg);
|
|
|
|
if(wg)
|
|
|
|
runtime·ready(wg);
|
|
|
|
}
|
|
|
|
|
2013-08-08 07:41:57 -06:00
|
|
|
uintptr
|
|
|
|
runtime·netpollfd(PollDesc *pd)
|
|
|
|
{
|
|
|
|
return pd->fd;
|
2014-02-24 20:31:01 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
void**
|
|
|
|
runtime·netpolluser(PollDesc *pd)
|
|
|
|
{
|
|
|
|
return &pd->user;
|
runtime: fix use after close race in Solaris network poller
The Solaris network poller uses event ports, which are
level-triggered. As such, it has to re-arm itself after each
wakeup. The arming mechanism (which runs in its own thread) raced
with the closing of a file descriptor happening in a different
thread. When a network file descriptor is about to be closed,
the network poller is awaken to give it a chance to remove its
association with the file descriptor. Because the poller always
re-armed itself, it raced with code that closed the descriptor.
This change makes the network poller check before re-arming if
the file descriptor is about to be closed, in which case it will
ignore the re-arming request. It uses the per-PollDesc lock in
order to serialize access to the PollDesc.
This change also adds extensive documentation describing the
Solaris implementation of the network poller.
Fixes #7410.
LGTM=dvyukov, iant
R=golang-codereviews, bradfitz, iant, dvyukov, aram.h, gobot
CC=golang-codereviews
https://golang.org/cl/69190044
2014-03-14 07:53:05 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
bool
|
|
|
|
runtime·netpollclosing(PollDesc *pd)
|
|
|
|
{
|
|
|
|
return pd->closing;
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·netpolllock(PollDesc *pd)
|
|
|
|
{
|
|
|
|
runtime·lock(pd);
|
|
|
|
}
|
|
|
|
|
|
|
|
void
|
|
|
|
runtime·netpollunlock(PollDesc *pd)
|
|
|
|
{
|
|
|
|
runtime·unlock(pd);
|
2013-08-08 07:41:57 -06:00
|
|
|
}
|
|
|
|
|
2013-03-14 00:38:37 -06:00
|
|
|
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
|
|
|
|
void
|
|
|
|
runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
|
|
|
|
{
|
|
|
|
G *rg, *wg;
|
|
|
|
|
|
|
|
rg = wg = nil;
|
|
|
|
if(mode == 'r' || mode == 'r'+'w')
|
2013-06-24 20:29:00 -06:00
|
|
|
rg = netpollunblock(pd, 'r', true);
|
2013-03-14 00:38:37 -06:00
|
|
|
if(mode == 'w' || mode == 'r'+'w')
|
2013-06-24 20:29:00 -06:00
|
|
|
wg = netpollunblock(pd, 'w', true);
|
2013-03-14 00:38:37 -06:00
|
|
|
if(rg) {
|
|
|
|
rg->schedlink = *gpp;
|
|
|
|
*gpp = rg;
|
|
|
|
}
|
|
|
|
if(wg) {
|
|
|
|
wg->schedlink = *gpp;
|
|
|
|
*gpp = wg;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static intgo
|
|
|
|
checkerr(PollDesc *pd, int32 mode)
|
|
|
|
{
|
|
|
|
if(pd->closing)
|
|
|
|
return 1; // errClosing
|
|
|
|
if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
|
|
|
|
return 2; // errTimeout
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2014-01-22 00:27:16 -07:00
|
|
|
static bool
|
|
|
|
blockcommit(G *gp, G **gpp)
|
|
|
|
{
|
|
|
|
return runtime·casp(gpp, WAIT, gp);
|
|
|
|
}
|
|
|
|
|
2013-06-24 20:29:00 -06:00
|
|
|
// returns true if IO is ready, or false if timedout or closed
|
2014-01-22 00:27:16 -07:00
|
|
|
// waitio - wait only for completed IO, ignore errors
|
2013-06-24 20:29:00 -06:00
|
|
|
static bool
|
2014-01-22 00:27:16 -07:00
|
|
|
netpollblock(PollDesc *pd, int32 mode, bool waitio)
|
2013-03-14 00:38:37 -06:00
|
|
|
{
|
2014-01-22 00:27:16 -07:00
|
|
|
G **gpp, *old;
|
2013-03-14 00:38:37 -06:00
|
|
|
|
|
|
|
gpp = &pd->rg;
|
|
|
|
if(mode == 'w')
|
|
|
|
gpp = &pd->wg;
|
2014-01-22 00:27:16 -07:00
|
|
|
|
|
|
|
// set the gpp semaphore to WAIT
|
|
|
|
for(;;) {
|
|
|
|
old = *gpp;
|
|
|
|
if(old == READY) {
|
|
|
|
*gpp = nil;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
if(old != nil)
|
|
|
|
runtime·throw("netpollblock: double wait");
|
|
|
|
if(runtime·casp(gpp, nil, WAIT))
|
|
|
|
break;
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
2014-01-22 00:27:16 -07:00
|
|
|
|
|
|
|
// need to recheck error states after setting gpp to WAIT
|
|
|
|
// this is necessary because runtime_pollUnblock/runtime_pollSetDeadline/deadlineimpl
|
|
|
|
// do the opposite: store to closing/rd/wd, membarrier, load of rg/wg
|
|
|
|
if(waitio || checkerr(pd, mode) == 0)
|
|
|
|
runtime·park((bool(*)(G*, void*))blockcommit, gpp, "IO wait");
|
|
|
|
// be careful to not lose concurrent READY notification
|
|
|
|
old = runtime·xchgp(gpp, nil);
|
|
|
|
if(old > WAIT)
|
|
|
|
runtime·throw("netpollblock: corrupted state");
|
|
|
|
return old == READY;
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static G*
|
2013-06-24 20:29:00 -06:00
|
|
|
netpollunblock(PollDesc *pd, int32 mode, bool ioready)
|
2013-03-14 00:38:37 -06:00
|
|
|
{
|
2014-01-22 00:27:16 -07:00
|
|
|
G **gpp, *old, *new;
|
2013-03-14 00:38:37 -06:00
|
|
|
|
|
|
|
gpp = &pd->rg;
|
|
|
|
if(mode == 'w')
|
|
|
|
gpp = &pd->wg;
|
2014-01-22 00:27:16 -07:00
|
|
|
|
|
|
|
for(;;) {
|
|
|
|
old = *gpp;
|
|
|
|
if(old == READY)
|
|
|
|
return nil;
|
|
|
|
if(old == nil && !ioready) {
|
|
|
|
// Only set READY for ioready. runtime_pollWait
|
|
|
|
// will check for timeout/cancel before waiting.
|
|
|
|
return nil;
|
|
|
|
}
|
|
|
|
new = nil;
|
2013-06-24 20:29:00 -06:00
|
|
|
if(ioready)
|
2014-01-22 00:27:16 -07:00
|
|
|
new = READY;
|
|
|
|
if(runtime·casp(gpp, old, new))
|
|
|
|
break;
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
2014-01-22 00:27:16 -07:00
|
|
|
if(old > WAIT)
|
|
|
|
return old; // must be G*
|
|
|
|
return nil;
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
deadlineimpl(int64 now, Eface arg, bool read, bool write)
|
|
|
|
{
|
|
|
|
PollDesc *pd;
|
|
|
|
uint32 seq;
|
|
|
|
G *rg, *wg;
|
|
|
|
|
|
|
|
USED(now);
|
|
|
|
pd = (PollDesc*)arg.data;
|
|
|
|
// This is the seq when the timer was set.
|
|
|
|
// If it's stale, ignore the timer event.
|
|
|
|
seq = (uintptr)arg.type;
|
|
|
|
rg = wg = nil;
|
|
|
|
runtime·lock(pd);
|
|
|
|
if(seq != pd->seq) {
|
|
|
|
// The descriptor was reused or timers were reset.
|
|
|
|
runtime·unlock(pd);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
if(read) {
|
|
|
|
if(pd->rd <= 0 || pd->rt.fv == nil)
|
|
|
|
runtime·throw("deadlineimpl: inconsistent read deadline");
|
|
|
|
pd->rd = -1;
|
2014-01-22 00:27:16 -07:00
|
|
|
runtime·atomicstorep(&pd->rt.fv, nil); // full memory barrier between store to rd and load of rg in netpollunblock
|
2013-06-24 20:29:00 -06:00
|
|
|
rg = netpollunblock(pd, 'r', false);
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
|
|
|
if(write) {
|
|
|
|
if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
|
|
|
|
runtime·throw("deadlineimpl: inconsistent write deadline");
|
|
|
|
pd->wd = -1;
|
2014-01-22 00:27:16 -07:00
|
|
|
runtime·atomicstorep(&pd->wt.fv, nil); // full memory barrier between store to wd and load of wg in netpollunblock
|
2013-06-24 20:29:00 -06:00
|
|
|
wg = netpollunblock(pd, 'w', false);
|
2013-03-14 00:38:37 -06:00
|
|
|
}
|
|
|
|
runtime·unlock(pd);
|
|
|
|
if(rg)
|
|
|
|
runtime·ready(rg);
|
|
|
|
if(wg)
|
|
|
|
runtime·ready(wg);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
deadline(int64 now, Eface arg)
|
|
|
|
{
|
|
|
|
deadlineimpl(now, arg, true, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
readDeadline(int64 now, Eface arg)
|
|
|
|
{
|
|
|
|
deadlineimpl(now, arg, true, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
writeDeadline(int64 now, Eface arg)
|
|
|
|
{
|
|
|
|
deadlineimpl(now, arg, false, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static PollDesc*
|
|
|
|
allocPollDesc(void)
|
|
|
|
{
|
|
|
|
PollDesc *pd;
|
|
|
|
uint32 i, n;
|
|
|
|
|
|
|
|
runtime·lock(&pollcache);
|
|
|
|
if(pollcache.first == nil) {
|
2014-01-30 02:28:19 -07:00
|
|
|
n = PollBlockSize/sizeof(*pd);
|
2013-03-14 00:38:37 -06:00
|
|
|
if(n == 0)
|
|
|
|
n = 1;
|
|
|
|
// Must be in non-GC memory because can be referenced
|
|
|
|
// only from epoll/kqueue internals.
|
runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.
test/bench/garbage/parser before:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14204928
MCacheSys 16384
BuckHashSys 1439992
after:
Sys 670064344
HeapSys 610271232
StackSys 65536
MSpanSys 14188544
MCacheSys 16384
BuckHashSys 3194304
GCSys 39198688
OtherSys 3129656
Fixes #5799.
R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 14:55:40 -06:00
|
|
|
pd = runtime·persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
|
2013-03-14 00:38:37 -06:00
|
|
|
for(i = 0; i < n; i++) {
|
|
|
|
pd[i].link = pollcache.first;
|
|
|
|
pollcache.first = &pd[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pd = pollcache.first;
|
|
|
|
pollcache.first = pd->link;
|
|
|
|
runtime·unlock(&pollcache);
|
|
|
|
return pd;
|
|
|
|
}
|