1
0
mirror of https://github.com/golang/go synced 2024-10-04 18:31:22 -06:00
go/src/pkg/runtime/netpoll.goc
Dmitriy Vyukov a33ef8d11b runtime: account for all sys memory in MemStats
Currently lots of sys allocations are not accounted in any of XxxSys,
including GC bitmap, spans table, GC roots blocks, GC finalizer blocks,
iface table, netpoll descriptors and more. Up to ~20% can unaccounted.
This change introduces 2 new stats: GCSys and OtherSys for GC metadata
and all other misc allocations, respectively.
Also ensures that all XxxSys indeed sum up to Sys. All sys memory allocation
functions require the stat for accounting, so that it's impossible to miss something.
Also fix updating of mcache_sys/inuse, they were not updated after deallocation.

test/bench/garbage/parser before:
Sys		670064344
HeapSys		610271232
StackSys	65536
MSpanSys	14204928
MCacheSys	16384
BuckHashSys	1439992

after:
Sys		670064344
HeapSys		610271232
StackSys	65536
MSpanSys	14188544
MCacheSys	16384
BuckHashSys	3194304
GCSys		39198688
OtherSys	3129656

Fixes #5799.

R=rsc, dave, alex.brainman
CC=golang-dev
https://golang.org/cl/12946043
2013-09-06 16:55:40 -04:00

393 lines
9.0 KiB
Plaintext

// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin dragonfly freebsd linux netbsd openbsd windows
package net
#include "runtime.h"
#include "defs_GOOS_GOARCH.h"
#include "arch_GOARCH.h"
#include "malloc.h"
// Integrated network poller (platform-independent part).
// A particular implementation (epoll/kqueue) must define the following functions:
// void runtime·netpollinit(void); // to initialize the poller
// int32 runtime·netpollopen(uintptr fd, PollDesc *pd); // to arm edge-triggered notifications
// and associate fd with pd.
// An implementation must call the following function to denote that the pd is ready.
// void runtime·netpollready(G **gpp, PollDesc *pd, int32 mode);
#define READY ((G*)1)
struct PollDesc
{
PollDesc* link; // in pollcache, protected by pollcache.Lock
Lock; // protectes the following fields
uintptr fd;
bool closing;
uintptr seq; // protects from stale timers and ready notifications
G* rg; // G waiting for read or READY (binary semaphore)
Timer rt; // read deadline timer (set if rt.fv != nil)
int64 rd; // read deadline
G* wg; // the same for writes
Timer wt;
int64 wd;
};
static struct
{
Lock;
PollDesc* first;
// PollDesc objects must be type-stable,
// because we can get ready notification from epoll/kqueue
// after the descriptor is closed/reused.
// Stale notifications are detected using seq variable,
// seq is incremented when deadlines are changed or descriptor is reused.
} pollcache;
static bool netpollblock(PollDesc*, int32);
static G* netpollunblock(PollDesc*, int32, bool);
static void deadline(int64, Eface);
static void readDeadline(int64, Eface);
static void writeDeadline(int64, Eface);
static PollDesc* allocPollDesc(void);
static intgo checkerr(PollDesc *pd, int32 mode);
static FuncVal deadlineFn = {(void(*)(void))deadline};
static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
func runtime_pollServerInit() {
runtime·netpollinit();
}
func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
pd = allocPollDesc();
runtime·lock(pd);
if(pd->wg != nil && pd->wg != READY)
runtime·throw("runtime_pollOpen: blocked write on free descriptor");
if(pd->rg != nil && pd->rg != READY)
runtime·throw("runtime_pollOpen: blocked read on free descriptor");
pd->fd = fd;
pd->closing = false;
pd->seq++;
pd->rg = nil;
pd->rd = 0;
pd->wg = nil;
pd->wd = 0;
runtime·unlock(pd);
errno = runtime·netpollopen(fd, pd);
}
func runtime_pollClose(pd *PollDesc) {
if(!pd->closing)
runtime·throw("runtime_pollClose: close w/o unblock");
if(pd->wg != nil && pd->wg != READY)
runtime·throw("runtime_pollClose: blocked write on closing descriptor");
if(pd->rg != nil && pd->rg != READY)
runtime·throw("runtime_pollClose: blocked read on closing descriptor");
runtime·netpollclose(pd->fd);
runtime·lock(&pollcache);
pd->link = pollcache.first;
pollcache.first = pd;
runtime·unlock(&pollcache);
}
func runtime_pollReset(pd *PollDesc, mode int) (err int) {
runtime·lock(pd);
err = checkerr(pd, mode);
if(err)
goto ret;
if(mode == 'r')
pd->rg = nil;
else if(mode == 'w')
pd->wg = nil;
ret:
runtime·unlock(pd);
}
func runtime_pollWait(pd *PollDesc, mode int) (err int) {
runtime·lock(pd);
err = checkerr(pd, mode);
if(err == 0) {
while(!netpollblock(pd, mode)) {
err = checkerr(pd, mode);
if(err != 0)
break;
// Can happen if timeout has fired and unblocked us,
// but before we had a chance to run, timeout has been reset.
// Pretend it has not happened and retry.
}
}
runtime·unlock(pd);
}
func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
runtime·lock(pd);
// wait for ioready, ignore closing or timeouts.
while(!netpollblock(pd, mode))
;
runtime·unlock(pd);
}
func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
G *rg, *wg;
runtime·lock(pd);
if(pd->closing) {
runtime·unlock(pd);
return;
}
pd->seq++; // invalidate current timers
// Reset current timers.
if(pd->rt.fv) {
runtime·deltimer(&pd->rt);
pd->rt.fv = nil;
}
if(pd->wt.fv) {
runtime·deltimer(&pd->wt);
pd->wt.fv = nil;
}
// Setup new timers.
if(d != 0 && d <= runtime·nanotime())
d = -1;
if(mode == 'r' || mode == 'r'+'w')
pd->rd = d;
if(mode == 'w' || mode == 'r'+'w')
pd->wd = d;
if(pd->rd > 0 && pd->rd == pd->wd) {
pd->rt.fv = &deadlineFn;
pd->rt.when = pd->rd;
// Copy current seq into the timer arg.
// Timer func will check the seq against current descriptor seq,
// if they differ the descriptor was reused or timers were reset.
pd->rt.arg.type = (Type*)pd->seq;
pd->rt.arg.data = pd;
runtime·addtimer(&pd->rt);
} else {
if(pd->rd > 0) {
pd->rt.fv = &readDeadlineFn;
pd->rt.when = pd->rd;
pd->rt.arg.type = (Type*)pd->seq;
pd->rt.arg.data = pd;
runtime·addtimer(&pd->rt);
}
if(pd->wd > 0) {
pd->wt.fv = &writeDeadlineFn;
pd->wt.when = pd->wd;
pd->wt.arg.type = (Type*)pd->seq;
pd->wt.arg.data = pd;
runtime·addtimer(&pd->wt);
}
}
// If we set the new deadline in the past, unblock currently pending IO if any.
rg = nil;
wg = nil;
if(pd->rd < 0)
rg = netpollunblock(pd, 'r', false);
if(pd->wd < 0)
wg = netpollunblock(pd, 'w', false);
runtime·unlock(pd);
if(rg)
runtime·ready(rg);
if(wg)
runtime·ready(wg);
}
func runtime_pollUnblock(pd *PollDesc) {
G *rg, *wg;
runtime·lock(pd);
if(pd->closing)
runtime·throw("runtime_pollUnblock: already closing");
pd->closing = true;
pd->seq++;
rg = netpollunblock(pd, 'r', false);
wg = netpollunblock(pd, 'w', false);
if(pd->rt.fv) {
runtime·deltimer(&pd->rt);
pd->rt.fv = nil;
}
if(pd->wt.fv) {
runtime·deltimer(&pd->wt);
pd->wt.fv = nil;
}
runtime·unlock(pd);
if(rg)
runtime·ready(rg);
if(wg)
runtime·ready(wg);
}
uintptr
runtime·netpollfd(PollDesc *pd)
{
return pd->fd;
}
// make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
void
runtime·netpollready(G **gpp, PollDesc *pd, int32 mode)
{
G *rg, *wg;
rg = wg = nil;
runtime·lock(pd);
if(mode == 'r' || mode == 'r'+'w')
rg = netpollunblock(pd, 'r', true);
if(mode == 'w' || mode == 'r'+'w')
wg = netpollunblock(pd, 'w', true);
runtime·unlock(pd);
if(rg) {
rg->schedlink = *gpp;
*gpp = rg;
}
if(wg) {
wg->schedlink = *gpp;
*gpp = wg;
}
}
static intgo
checkerr(PollDesc *pd, int32 mode)
{
if(pd->closing)
return 1; // errClosing
if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
return 2; // errTimeout
return 0;
}
// returns true if IO is ready, or false if timedout or closed
static bool
netpollblock(PollDesc *pd, int32 mode)
{
G **gpp;
gpp = &pd->rg;
if(mode == 'w')
gpp = &pd->wg;
if(*gpp == READY) {
*gpp = nil;
return true;
}
if(*gpp != nil)
runtime·throw("netpollblock: double wait");
*gpp = g;
runtime·park(runtime·unlock, &pd->Lock, "IO wait");
runtime·lock(pd);
if(g->param)
return true;
return false;
}
static G*
netpollunblock(PollDesc *pd, int32 mode, bool ioready)
{
G **gpp, *old;
gpp = &pd->rg;
if(mode == 'w')
gpp = &pd->wg;
if(*gpp == READY)
return nil;
if(*gpp == nil) {
// Only set READY for ioready. runtime_pollWait
// will check for timeout/cancel before waiting.
if(ioready)
*gpp = READY;
return nil;
}
old = *gpp;
// pass unblock reason onto blocked g
old->param = (void*)ioready;
*gpp = nil;
return old;
}
static void
deadlineimpl(int64 now, Eface arg, bool read, bool write)
{
PollDesc *pd;
uint32 seq;
G *rg, *wg;
USED(now);
pd = (PollDesc*)arg.data;
// This is the seq when the timer was set.
// If it's stale, ignore the timer event.
seq = (uintptr)arg.type;
rg = wg = nil;
runtime·lock(pd);
if(seq != pd->seq) {
// The descriptor was reused or timers were reset.
runtime·unlock(pd);
return;
}
if(read) {
if(pd->rd <= 0 || pd->rt.fv == nil)
runtime·throw("deadlineimpl: inconsistent read deadline");
pd->rd = -1;
pd->rt.fv = nil;
rg = netpollunblock(pd, 'r', false);
}
if(write) {
if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
runtime·throw("deadlineimpl: inconsistent write deadline");
pd->wd = -1;
pd->wt.fv = nil;
wg = netpollunblock(pd, 'w', false);
}
runtime·unlock(pd);
if(rg)
runtime·ready(rg);
if(wg)
runtime·ready(wg);
}
static void
deadline(int64 now, Eface arg)
{
deadlineimpl(now, arg, true, true);
}
static void
readDeadline(int64 now, Eface arg)
{
deadlineimpl(now, arg, true, false);
}
static void
writeDeadline(int64 now, Eface arg)
{
deadlineimpl(now, arg, false, true);
}
static PollDesc*
allocPollDesc(void)
{
PollDesc *pd;
uint32 i, n;
runtime·lock(&pollcache);
if(pollcache.first == nil) {
n = PageSize/sizeof(*pd);
if(n == 0)
n = 1;
// Must be in non-GC memory because can be referenced
// only from epoll/kqueue internals.
pd = runtime·persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
for(i = 0; i < n; i++) {
pd[i].link = pollcache.first;
pollcache.first = &pd[i];
}
}
pd = pollcache.first;
pollcache.first = pd->link;
runtime·unlock(&pollcache);
return pd;
}