1
0
mirror of https://github.com/golang/go synced 2024-10-04 04:31:21 -06:00
go/src/pkg/runtime/proc.c

3671 lines
96 KiB
C
Raw Normal View History

2008-07-14 15:34:27 -06:00
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "runtime.h"
#include "arch_GOARCH.h"
#include "zaexperiment.h"
#include "malloc.h"
ld: detect stack overflow due to NOSPLIT Fix problems found. On amd64, various library routines had bigger stack frames than expected, because large function calls had been added. runtime.assertI2T: nosplit stack overflow 120 assumed on entry to runtime.assertI2T 8 after runtime.assertI2T uses 112 0 on entry to runtime.newTypeAssertionError -8 on entry to runtime.morestack01 runtime.assertE2E: nosplit stack overflow 120 assumed on entry to runtime.assertE2E 16 after runtime.assertE2E uses 104 8 on entry to runtime.panic 0 on entry to runtime.morestack16 -8 after runtime.morestack16 uses 8 runtime.assertE2T: nosplit stack overflow 120 assumed on entry to runtime.assertE2T 16 after runtime.assertE2T uses 104 8 on entry to runtime.panic 0 on entry to runtime.morestack16 -8 after runtime.morestack16 uses 8 runtime.newselect: nosplit stack overflow 120 assumed on entry to runtime.newselect 56 after runtime.newselect uses 64 48 on entry to runtime.printf 8 after runtime.printf uses 40 0 on entry to vprintf -8 on entry to runtime.morestack16 runtime.selectdefault: nosplit stack overflow 120 assumed on entry to runtime.selectdefault 56 after runtime.selectdefault uses 64 48 on entry to runtime.printf 8 after runtime.printf uses 40 0 on entry to vprintf -8 on entry to runtime.morestack16 runtime.selectgo: nosplit stack overflow 120 assumed on entry to runtime.selectgo 0 after runtime.selectgo uses 120 -8 on entry to runtime.gosched On arm, 5c was tagging functions NOSPLIT that should not have been, like the recursive function printpanics: printpanics: nosplit stack overflow 124 assumed on entry to printpanics 112 after printpanics uses 12 108 on entry to printpanics 96 after printpanics uses 12 92 on entry to printpanics 80 after printpanics uses 12 76 on entry to printpanics 64 after printpanics uses 12 60 on entry to printpanics 48 after printpanics uses 12 44 on entry to printpanics 32 after printpanics uses 12 28 on entry to printpanics 16 after printpanics uses 12 12 on entry to printpanics 0 after printpanics uses 12 -4 on entry to printpanics R=r, r2 CC=golang-dev https://golang.org/cl/4188061
2011-02-22 15:40:40 -07:00
#include "stack.h"
#include "race.h"
#include "type.h"
#include "mgc0.h"
#include "../../cmd/ld/textflag.h"
2008-07-14 15:34:27 -06:00
// Goroutine scheduler
// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
//
// The main concepts are:
// G - goroutine.
// M - worker thread, or machine.
// P - processor, a resource that is required to execute Go code.
// M must have an associated P to execute Go code, however it can be
// blocked or in a syscall w/o an associated P.
//
// Design doc at http://golang.org/s/go11sched.
typedef struct Sched Sched;
struct Sched {
Mutex lock;
uint64 goidgen;
M* midle; // idle m's waiting for work
int32 nmidle; // number of idle m's waiting for work
int32 nmidlelocked; // number of locked m's waiting for work
int32 mcount; // number of m's that have been created
int32 maxmcount; // maximum number of m's allowed (or die)
P* pidle; // idle P's
uint32 npidle;
uint32 nmspinning;
// Global runnable queue.
G* runqhead;
G* runqtail;
int32 runqsize;
// Global cache of dead G's.
Mutex gflock;
G* gfree;
int32 ngfree;
uint32 gcwaiting; // gc is waiting to run
int32 stopwait;
Note stopnote;
uint32 sysmonwait;
Note sysmonnote;
uint64 lastpoll;
int32 profilehz; // cpu profiling rate
};
enum
{
// Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
GoidCacheBatch = 16,
};
Sched runtime·sched;
int32 runtime·gomaxprocs;
uint32 runtime·needextram;
bool runtime·iscgo;
M runtime·m0;
G runtime·g0; // idle goroutine for m0
G* runtime·lastg;
M* runtime·allm;
M* runtime·extram;
P* runtime·allp[MaxGomaxprocs+1];
int8* runtime·goos;
int32 runtime·ncpu;
static int32 newprocs;
static Mutex allglock; // the following vars are protected by this lock or by stoptheworld
G** runtime·allg;
Slice runtime·allgs;
uintptr runtime·allglen;
static uintptr allgcap;
ForceGCState runtime·forcegc;
void runtime·mstart(void);
static void runqput(P*, G*);
static G* runqget(P*);
static bool runqputslow(P*, G*, uint32, uint32);
static G* runqsteal(P*, P*);
static void mput(M*);
static M* mget(void);
static void mcommoninit(M*);
static void schedule(void);
static void procresize(int32);
static void acquirep(P*);
static P* releasep(void);
static void newm(void(*)(void), P*);
static void stopm(void);
static void startm(P*, bool);
static void handoffp(P*);
static void wakep(void);
static void stoplockedm(void);
static void startlockedm(G*);
static void sysmon(void);
static uint32 retake(int64);
static void incidlelocked(int32);
static void checkdead(void);
static void exitsyscall0(G*);
void runtime·park_m(G*);
static void goexit0(G*);
static void gfput(P*, G*);
static G* gfget(P*);
static void gfpurge(P*);
static void globrunqput(G*);
static void globrunqputbatch(G*, G*, int32);
static G* globrunqget(P*, int32);
static P* pidleget(void);
static void pidleput(P*);
static void injectglist(G*);
static bool preemptall(void);
static bool preemptone(P*);
static bool exitsyscallfast(void);
static bool haveexperiment(int8*);
static void allgadd(G*);
static void dropg(void);
extern String runtime·buildVersion;
// For cgo-using programs with external linking,
// export "main" (defined in assembly) so that libc can handle basic
// C runtime startup and call the Go program as if it were
// the C main function.
#pragma cgo_export_static main
// Filled in by dynamic linker when Cgo is available.
void* _cgo_init;
void* _cgo_malloc;
void* _cgo_free;
// Copy for Go code.
void* runtime·cgoMalloc;
void* runtime·cgoFree;
// The bootstrap sequence is:
//
// call osinit
// call schedinit
// make & queue new G
// call runtime·mstart
//
// The new G calls runtime·main.
void
runtime·schedinit(void)
{
int32 n, procs;
byte *p;
Eface i;
// raceinit must be the first call to race detector.
// In particular, it must be done before mallocinit below calls racemapshadow.
if(raceenabled)
g->racectx = runtime·raceinit();
runtime·sched.maxmcount = 10000;
2014-02-19 15:09:08 -07:00
runtime·precisestack = true; // haveexperiment("precisestack");
runtime·symtabinit();
runtime·stackinit();
runtime·mallocinit();
2014-06-26 09:54:39 -06:00
mcommoninit(g->m);
// Initialize the itable value for newErrorCString,
// so that the next time it gets called, possibly
// in a fault during a garbage collection, it will not
// need to allocated memory.
runtime·newErrorCString(0, &i);
runtime·goargs();
runtime·goenvs();
runtime·parsedebugvars();
runtime·gcinit();
runtime·sched.lastpoll = runtime·nanotime();
procs = 1;
p = runtime·getenv("GOMAXPROCS");
if(p != nil && (n = runtime·atoi(p)) > 0) {
if(n > MaxGomaxprocs)
n = MaxGomaxprocs;
procs = n;
}
procresize(procs);
runtime·copystack = runtime·precisestack;
p = runtime·getenv("GOCOPYSTACK");
if(p != nil && !runtime·strcmp(p, (byte*)"0"))
runtime·copystack = false;
if(runtime·buildVersion.str == nil) {
// Condition should never trigger. This code just serves
// to ensure runtime·buildVersion is kept in the resulting binary.
runtime·buildVersion.str = (uint8*)"unknown";
runtime·buildVersion.len = 7;
}
runtime·cgoMalloc = _cgo_malloc;
runtime·cgoFree = _cgo_free;
}
extern void main·init(void);
extern void runtime·init(void);
extern void main·main(void);
static FuncVal initDone = { runtime·unlockOSThread };
// The main goroutine.
// Note: C frames in general are not copyable during stack growth, for two reasons:
// 1) We don't know where in a frame to find pointers to other stack locations.
// 2) There's no guarantee that globals or heap values do not point into the frame.
//
// The C frame for runtime.main is copyable, because:
// 1) There are no pointers to other stack locations in the frame
// (d.fn points at a global, d.link is nil, d.argp is -1).
// 2) The only pointer into this frame is from the defer chain,
// which is explicitly handled during stack copying.
void
runtime·main(void)
{
Defer d;
// Racectx of m0->g0 is used only as the parent of the main goroutine.
// It must not be used for anything else.
g->m->g0->racectx = 0;
// Max stack size is 1 GB on 64-bit, 250 MB on 32-bit.
// Using decimal instead of binary GB and MB because
// they look nicer in the stack overflow failure message.
if(sizeof(void*) == 8)
runtime·maxstacksize = 1000000000;
else
runtime·maxstacksize = 250000000;
newm(sysmon, nil);
// Lock the main goroutine onto this, the main OS thread,
// during initialization. Most programs won't care, but a few
// do require certain calls to be made by the main thread.
// Those can arrange for main.main to run in the main thread
// by calling runtime.LockOSThread during initialization
// to preserve the lock.
runtime·lockOSThread();
// Defer unlock so that runtime.Goexit during init does the unlock too.
d.fn = &initDone;
d.siz = 0;
d.link = g->defer;
d.argp = NoArgs;
d.special = true;
g->defer = &d;
2014-06-26 09:54:39 -06:00
if(g->m != &runtime·m0)
runtime·throw("runtime·main not on m0");
runtime·init();
mstats.enablegc = 1; // now that runtime is initialized, GC is okay
main·init();
if(g->defer != &d || d.fn != &initDone)
runtime·throw("runtime: bad defer entry after init");
g->defer = d.link;
runtime·unlockOSThread();
main·main();
if(raceenabled)
runtime·racefini();
// Make racy client program work: if panicking on
// another goroutine at the same time as main returns,
// let the other goroutine finish printing the panic trace.
// Once it does, it will exit. See issue 3934.
if(runtime·panicking)
runtime·park(nil, nil, runtime·gostringnocopy((byte*)"panicwait"));
runtime·exit(0);
for(;;)
*(int32*)runtime·main = 0;
}
static void
dumpgstatus(G* gp)
{
runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
runtime·printf("runtime: g: g=%p, goid=%D, g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
}
static void
checkmcount(void)
{
// sched lock is held
if(runtime·sched.mcount > runtime·sched.maxmcount){
runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
runtime·throw("thread exhaustion");
}
}
static void
mcommoninit(M *mp)
{
runtime: fix unexpected return pc for runtime.newstackcall With cl/112640043 TestCgoDeadlockCrash episodically print: unexpected return pc for runtime.newstackcall After adding debug output I see the following trace: runtime: unexpected return pc for runtime.newstackcall called from 0xc208011b00 runtime.throw(0x414da86) src/pkg/runtime/panic.c:523 +0x77 runtime.gentraceback(0x40165fc, 0xba440c28, 0x0, 0xc208d15200, 0xc200000000, 0xc208ddfd20, 0x20, 0x0, 0x0, 0x300) src/pkg/runtime/traceback_x86.c:185 +0xca4 runtime.callers(0x1, 0xc208ddfd20, 0x20) src/pkg/runtime/traceback_x86.c:438 +0x98 mcommoninit(0xc208ddfc00) src/pkg/runtime/proc.c:369 +0x5c runtime.allocm(0xc208052000) src/pkg/runtime/proc.c:686 +0xa6 newm(0x4017850, 0xc208052000) src/pkg/runtime/proc.c:933 +0x27 startm(0xc208052000, 0x100000001) src/pkg/runtime/proc.c:1011 +0xba wakep() src/pkg/runtime/proc.c:1071 +0x57 resetspinning() src/pkg/runtime/proc.c:1297 +0xa1 schedule() src/pkg/runtime/proc.c:1366 +0x14b runtime.gosched0(0xc20808e240) src/pkg/runtime/proc.c:1465 +0x5b runtime.newstack() src/pkg/runtime/stack.c:891 +0x44d runtime: unexpected return pc for runtime.newstackcall called from 0xc208011b00 runtime.newstackcall(0x4000cbd, 0x4000b80) src/pkg/runtime/asm_amd64.s:278 +0x6f I suspect that it can happen on any stack split. So don't unwind g0 stack. Also, that comment is lying -- we can traceback w/o mcache, CPU profiler does that. LGTM=rsc R=golang-codereviews CC=golang-codereviews, khr, rsc https://golang.org/cl/120040043
2014-07-23 08:51:34 -06:00
// g0 stack won't make sense for user (and is not necessary unwindable).
if(g != g->m->g0)
runtime·callers(1, mp->createstack, nelem(mp->createstack));
mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
runtime·lock(&runtime·sched.lock);
mp->id = runtime·sched.mcount++;
checkmcount();
runtime·mpreinit(mp);
2014-06-26 09:54:39 -06:00
// Add to runtime·allm so garbage collector doesn't free g->m
// when it is just in a register or thread-local storage.
mp->alllink = runtime·allm;
// runtime·NumCgoCall() iterates over allm w/o schedlock,
// so we need to publish it safely.
runtime·atomicstorep(&runtime·allm, mp);
runtime·unlock(&runtime·sched.lock);
}
// Mark gp ready to run.
void
runtime·ready(G *gp)
{
uint32 status;
status = runtime·readgstatus(gp);
// Mark runnable.
2014-06-26 09:54:39 -06:00
g->m->locks++; // disable preemption because it can be holding p in a local var
if((status&~Gscan) != Gwaiting){
dumpgstatus(gp);
runtime·throw("bad g->status in ready");
runtime: always run stackalloc on scheduler stack Avoids deadlocks like the one below, in which a stack split happened in order to call lock(&stacks), but then the stack unsplit cannot run because stacks is now locked. The only code calling stackalloc that wasn't on a scheduler stack already was malg, which creates a new goroutine. runtime.futex+0x23 /home/rsc/g/go/src/pkg/runtime/linux/amd64/sys.s:139 runtime.futex() futexsleep+0x50 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:51 futexsleep(0x5b0188, 0x300000003, 0x100020000, 0x4159e2) futexlock+0x85 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:119 futexlock(0x5b0188, 0x5b0188) runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x7f0d27b4a000) runtime.stackfree+0x4d /home/rsc/g/go/src/pkg/runtime/malloc.goc:336 runtime.stackfree(0x7f0d27b4a000, 0x1000, 0x8, 0x7fff37e1e218) runtime.oldstack+0xa6 /home/rsc/g/go/src/pkg/runtime/proc.c:705 runtime.oldstack() runtime.lessstack+0x22 /home/rsc/g/go/src/pkg/runtime/amd64/asm.s:224 runtime.lessstack() ----- lessstack called from goroutine 2 ----- runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x40a5e2) runtime.stackalloc+0x55 /home/rsc/g/go/src/pkg/runtime/malloc.c:316 runtime.stackalloc(0x1000, 0x4055b0) runtime.malg+0x3d /home/rsc/g/go/src/pkg/runtime/proc.c:803 runtime.malg(0x1000, 0x40add9) runtime.newproc1+0x12b /home/rsc/g/go/src/pkg/runtime/proc.c:854 runtime.newproc1(0xf840027440, 0x7f0d27b49230, 0x0, 0x49f238, 0x40, ...) runtime.newproc+0x2f /home/rsc/g/go/src/pkg/runtime/proc.c:831 runtime.newproc(0x0, 0xf840027440, 0xf800000010, 0x44b059) ... R=r, r2 CC=golang-dev https://golang.org/cl/4216045
2011-02-23 13:51:20 -07:00
}
// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
runtime·casgstatus(gp, Gwaiting, Grunnable);
2014-06-26 09:54:39 -06:00
runqput(g->m->p, gp);
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0) // TODO: fast atomic
wakep();
2014-06-26 09:54:39 -06:00
g->m->locks--;
if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
g->stackguard0 = StackPreempt;
2008-07-14 15:34:27 -06:00
}
void
runtime·ready_m(void)
{
G *gp;
gp = g->m->ptrarg[0];
g->m->ptrarg[0] = nil;
runtime·ready(gp);
}
int32
runtime·gcprocs(void)
{
int32 n;
// Figure out how many CPUs to use during GC.
// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
runtime·lock(&runtime·sched.lock);
n = runtime·gomaxprocs;
if(n > runtime·ncpu)
n = runtime·ncpu;
if(n > MaxGcproc)
n = MaxGcproc;
if(n > runtime·sched.nmidle+1) // one M is currently running
n = runtime·sched.nmidle+1;
runtime·unlock(&runtime·sched.lock);
return n;
}
static bool
needaddgcproc(void)
{
int32 n;
runtime·lock(&runtime·sched.lock);
n = runtime·gomaxprocs;
if(n > runtime·ncpu)
n = runtime·ncpu;
if(n > MaxGcproc)
n = MaxGcproc;
n -= runtime·sched.nmidle+1; // one M is currently running
runtime·unlock(&runtime·sched.lock);
return n > 0;
}
void
runtime·helpgc(int32 nproc)
{
M *mp;
int32 n, pos;
runtime·lock(&runtime·sched.lock);
pos = 0;
for(n = 1; n < nproc; n++) { // one M is currently running
2014-06-26 09:54:39 -06:00
if(runtime·allp[pos]->mcache == g->m->mcache)
pos++;
mp = mget();
if(mp == nil)
runtime·throw("runtime·gcprocs inconsistency");
mp->helpgc = n;
mp->mcache = runtime·allp[pos]->mcache;
pos++;
runtime·notewakeup(&mp->park);
}
runtime·unlock(&runtime·sched.lock);
}
// Similar to stoptheworld but best-effort and can be called several times.
// There is no reverse operation, used during crashing.
// This function must not lock any mutexes.
void
runtime·freezetheworld(void)
{
int32 i;
if(runtime·gomaxprocs == 1)
return;
// stopwait and preemption requests can be lost
// due to races with concurrently executing threads,
// so try several times
for(i = 0; i < 5; i++) {
// this should tell the scheduler to not start any new goroutines
runtime·sched.stopwait = 0x7fffffff;
runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
// this should stop running goroutines
if(!preemptall())
break; // no running goroutines
runtime·usleep(1000);
}
// to be sure
runtime·usleep(1000);
preemptall();
runtime·usleep(1000);
}
static bool
isscanstatus(uint32 status)
{
if(status == Gscan)
runtime·throw("isscanstatus: Bad status Gscan");
return (status&Gscan) == Gscan;
}
// All reads and writes of g's status go through readgstatus, casgstatus
// castogscanstatus, casfromgscanstatus.
#pragma textflag NOSPLIT
uint32
runtime·readgstatus(G *gp)
{
return runtime·atomicload(&gp->atomicstatus);
}
// The Gscanstatuses are acting like locks and this releases them.
// If it proves to be a performance hit we should be able to make these
// simple atomic stores but for now we are going to throw if
// we see an inconsistent state.
void
runtime·casfromgscanstatus(G *gp, uint32 oldval, uint32 newval)
{
bool success = false;
// Check that transition is valid.
switch(oldval) {
case Gscanrunnable:
case Gscanwaiting:
case Gscanrunning:
case Gscansyscall:
if(newval == (oldval&~Gscan))
success = runtime·cas(&gp->atomicstatus, oldval, newval);
break;
case Gscanenqueue:
if(newval == Gwaiting)
success = runtime·cas(&gp->atomicstatus, oldval, newval);
break;
}
if(!success){
runtime·printf("runtime: casfromgscanstatus failed gp=%p, oldval=%d, newval=%d\n",
gp, oldval, newval);
dumpgstatus(gp);
runtime·throw("casfromgscanstatus: gp->status is not in scan state");
}
}
// This will return false if the gp is not in the expected status and the cas fails.
// This acts like a lock acquire while the casfromgstatus acts like a lock release.
bool
runtime·castogscanstatus(G *gp, uint32 oldval, uint32 newval)
{
switch(oldval) {
case Grunnable:
case Gwaiting:
case Gsyscall:
if(newval == (oldval|Gscan))
return runtime·cas(&gp->atomicstatus, oldval, newval);
break;
case Grunning:
if(newval == Gscanrunning || newval == Gscanenqueue)
return runtime·cas(&gp->atomicstatus, oldval, newval);
break;
}
runtime·printf("runtime: castogscanstatus oldval=%d newval=%d\n", oldval, newval);
runtime·throw("castogscanstatus");
return false; // not reached
}
static void badcasgstatus(void);
static void helpcasgstatus(void);
// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
// and casfromgscanstatus instead.
// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that
// put it in the Gscan state is finished.
#pragma textflag NOSPLIT
void
runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
{
void (*fn)(void);
if((oldval&Gscan) || (newval&Gscan) || oldval == newval) {
g->m->scalararg[0] = oldval;
g->m->scalararg[1] = newval;
fn = badcasgstatus;
runtime·onM(&fn);
}
// loop if gp->atomicstatus is in a scan state giving
// GC time to finish and change the state to oldval.
while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
// Help GC if needed.
if(gp->preemptscan && !gp->gcworkdone && (oldval == Grunning || oldval == Gsyscall)) {
gp->preemptscan = false;
g->m->ptrarg[0] = gp;
fn = helpcasgstatus;
runtime·onM(&fn);
}
}
}
static void
badcasgstatus(void)
{
uint32 oldval, newval;
oldval = g->m->scalararg[0];
newval = g->m->scalararg[1];
g->m->scalararg[0] = 0;
g->m->scalararg[1] = 0;
runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, newval);
runtime·throw("casgstatus: bad incoming values");
}
static void
helpcasgstatus(void)
{
G *gp;
gp = g->m->ptrarg[0];
g->m->ptrarg[0] = 0;
runtime·gcphasework(gp);
}
// stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
// or in the context of a moving collector the pointers can be flipped from pointing
// to old object to pointing to new objects.
// If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
// the caller calls restartg.
// If stopg returns false, the caller is not responsible for calling restartg. This can happen
// if another thread, either the gp itself or another GC thread is taking the responsibility
// to do the GC work related to this thread.
bool
runtime·stopg(G *gp)
{
uint32 s;
for(;;) {
if(gp->gcworkdone)
return false;
s = runtime·readgstatus(gp);
switch(s) {
default:
dumpgstatus(gp);
runtime·throw("stopg: gp->atomicstatus is not valid");
case Gdead:
return false;
case Gcopystack:
// Loop until a new stack is in place.
break;
case Grunnable:
case Gsyscall:
case Gwaiting:
// Claim goroutine by setting scan bit.
if(!runtime·castogscanstatus(gp, s, s|Gscan))
break;
// In scan state, do work.
runtime·gcphasework(gp);
return true;
case Gscanrunnable:
case Gscanwaiting:
case Gscansyscall:
// Goroutine already claimed by another GC helper.
return false;
case Grunning:
// Claim goroutine, so we aren't racing with a status
// transition away from Grunning.
if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
break;
// Mark gp for preemption.
if(!gp->gcworkdone) {
gp->preemptscan = true;
gp->preempt = true;
gp->stackguard0 = StackPreempt;
}
// Unclaim.
runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
return false;
}
}
// Should not be here....
}
// The GC requests that this routine be moved from a scanmumble state to a mumble state.
void
runtime·restartg (G *gp)
{
uint32 s;
s = runtime·readgstatus(gp);
switch(s) {
default:
dumpgstatus(gp);
runtime·throw("restartg: unexpected status");
case Gdead:
break;
case Gscanrunnable:
case Gscanwaiting:
case Gscansyscall:
runtime·casfromgscanstatus(gp, s, s&~Gscan);
break;
case Gscanenqueue:
// Scan is now completed.
// Goroutine now needs to be made runnable.
// We put it on the global run queue; ready blocks on the global scheduler lock.
runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
if(gp != g->m->curg)
runtime·throw("processing Gscanenqueue on wrong m");
dropg();
runtime·ready(gp);
break;
}
}
static void
stopscanstart(G* gp)
{
if(g == gp)
runtime·throw("GC not moved to G0");
if(runtime·stopg(gp)) {
if(!isscanstatus(runtime·readgstatus(gp))) {
dumpgstatus(gp);
runtime·throw("GC not in scan state");
}
runtime·restartg(gp);
}
}
// Runs on g0 and does the actual work after putting the g back on the run queue.
static void
mquiesce(G *gpmaster)
{
G* gp;
uint32 i;
uint32 status;
uint32 activeglen;
activeglen = runtime·allglen;
// enqueue the calling goroutine.
runtime·restartg(gpmaster);
for(i = 0; i < activeglen; i++) {
gp = runtime·allg[i];
if(runtime·readgstatus(gp) == Gdead)
gp->gcworkdone = true; // noop scan.
else
gp->gcworkdone = false;
stopscanstart(gp);
}
// Check that the G's gcwork (such as scanning) has been done. If not do it now.
// You can end up doing work here if the page trap on a Grunning Goroutine has
// not been sprung or in some race situations. For example a runnable goes dead
// and is started up again with a gp->gcworkdone set to false.
for(i = 0; i < activeglen; i++) {
gp = runtime·allg[i];
while (!gp->gcworkdone) {
status = runtime·readgstatus(gp);
if(status == Gdead) {
gp->gcworkdone = true; // scan is a noop
break;
//do nothing, scan not needed.
}
if(status == Grunning && gp->stackguard0 == (uintptr)StackPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecond arg
runtime·noteclear(&runtime·sched.stopnote);
else
stopscanstart(gp);
}
}
for(i = 0; i < activeglen; i++) {
gp = runtime·allg[i];
status = runtime·readgstatus(gp);
if(isscanstatus(status)) {
runtime·printf("mstopandscang:bottom: post scan bad status gp=%p has status %x\n", gp, status);
dumpgstatus(gp);
}
if(!gp->gcworkdone && status != Gdead) {
runtime·printf("mstopandscang:bottom: post scan gp=%p->gcworkdone still false\n", gp);
dumpgstatus(gp);
}
}
schedule(); // Never returns.
}
// quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
// If the global runtime·gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
// have been scanned before it returns.
void
runtime·quiesce(G* mastergp)
{
void (*fn)(G*);
runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
// Now move this to the g0 (aka m) stack.
// g0 will potentially scan this thread and put mastergp on the runqueue
fn = mquiesce;
runtime·mcall(&fn);
}
// This is used by the GC as well as the routines that do stack dumps. In the case
// of GC all the routines can be reliably stopped. This is not always the case
// when the system is in panic or being exited.
void
runtime·stoptheworld(void)
{
int32 i;
uint32 s;
P *p;
bool wait;
// If we hold a lock, then we won't be able to stop another M
// that is blocked trying to acquire the lock.
if(g->m->locks > 0)
runtime·throw("stoptheworld: holding locks");
runtime·lock(&runtime·sched.lock);
runtime·sched.stopwait = runtime·gomaxprocs;
runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
preemptall();
// stop current P
g->m->p->status = Pgcstop; // Pgcstop is only diagnostic.
runtime·sched.stopwait--;
// try to retake all P's in Psyscall status
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
s = p->status;
if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
runtime·sched.stopwait--;
}
// stop idle P's
while(p = pidleget()) {
p->status = Pgcstop;
runtime·sched.stopwait--;
}
wait = runtime·sched.stopwait > 0;
runtime·unlock(&runtime·sched.lock);
// wait for remaining P's to stop voluntarily
if(wait) {
for(;;) {
// wait for 100us, then try to re-preempt in case of any races
if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
runtime·noteclear(&runtime·sched.stopnote);
break;
}
preemptall();
}
}
if(runtime·sched.stopwait)
runtime·throw("stoptheworld: not stopped");
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p->status != Pgcstop)
runtime·throw("stoptheworld: not stopped");
}
}
static void
mhelpgc(void)
{
2014-06-26 09:54:39 -06:00
g->m->helpgc = -1;
}
void
runtime·starttheworld(void)
{
P *p, *p1;
M *mp;
G *gp;
bool add;
2014-06-26 09:54:39 -06:00
g->m->locks++; // disable preemption because it can be holding p in a local var
gp = runtime·netpoll(false); // non-blocking
injectglist(gp);
add = needaddgcproc();
runtime·lock(&runtime·sched.lock);
if(newprocs) {
procresize(newprocs);
newprocs = 0;
} else
procresize(runtime·gomaxprocs);
runtime·sched.gcwaiting = 0;
p1 = nil;
while(p = pidleget()) {
// procresize() puts p's with work at the beginning of the list.
// Once we reach a p without a run queue, the rest don't have one either.
if(p->runqhead == p->runqtail) {
pidleput(p);
break;
}
p->m = mget();
p->link = p1;
p1 = p;
}
if(runtime·sched.sysmonwait) {
runtime·sched.sysmonwait = false;
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched.lock);
while(p1) {
p = p1;
p1 = p1->link;
if(p->m) {
mp = p->m;
p->m = nil;
if(mp->nextp)
runtime·throw("starttheworld: inconsistent mp->nextp");
mp->nextp = p;
runtime·notewakeup(&mp->park);
} else {
// Start M to run P. Do not start another M below.
newm(nil, p);
add = false;
}
}
if(add) {
// If GC could have used another helper proc, start one now,
// in the hope that it will be available next time.
// It would have been even better to start it before the collection,
// but doing so requires allocating memory, so it's tricky to
// coordinate. This lazy approach works out in practice:
// we don't mind if the first couple gc rounds don't have quite
// the maximum number of procs.
newm(mhelpgc, nil);
}
2014-06-26 09:54:39 -06:00
g->m->locks--;
if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
g->stackguard0 = StackPreempt;
}
// Called to start an M.
void
runtime·mstart(void)
{
2014-06-26 09:54:39 -06:00
if(g != g->m->g0)
runtime·throw("bad runtime·mstart");
runtime: scheduler, cgo reorganization * Change use of m->g0 stack (aka scheduler stack). * Provide runtime.mcall(f) to invoke f() on m->g0 stack. * Replace scheduler loop entry with runtime.mcall(schedule). Runtime.mcall eliminates the need for fake scheduler states that exist just to run a bit of code on the m->g0 stack (Grecovery, Gstackalloc). The elimination of the scheduler as a loop that stops and starts using gosave and gogo fixes a bad interaction with the way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled) C functions on that stack, and then when calling back into Go, it sets m->g0->sched.sp below the added call frames, so that other uses of m->g0's stack will not interfere with those frames. Unfortunately, gogo (longjmp) back to the scheduler loop at this point would end up running scheduler with the lower sp, which no longer points at a valid stack frame for a call to scheduler. If scheduler then wrote any function call arguments or local variables to where it expected the stack frame to be, it would overwrite other data on the stack. I realized this possibility while debugging a problem with calling complex Go code in a Go -> C -> Go cgo callback. This wasn't the bug I was looking for, it turns out, but I believe it is a real bug nonetheless. Switching to runtime.mcall, which only adds new frames to the stack and never jumps into functions running in existing ones, fixes this bug. * Move cgo-related code out of proc.c into cgocall.c. * Add very large comment describing cgo call sequences. * Simpilify, regularize cgo function implementations and names. * Add test suite as misc/cgo/test. Now the Go -> C path calls cgocall, which calls asmcgocall, and the C -> Go path calls cgocallback, which calls cgocallbackg. The shuffling, which affects mainly the callback case, moves most of the callback implementation to cgocallback running on the m->curg stack (not the m->g0 scheduler stack) and only while accounted for with $GOMAXPROCS (between calls to exitsyscall and entersyscall). The previous callback code did not block in startcgocallback's approximation to exitsyscall, so if, say, the garbage collector were running, it would still barge in and start doing things like call malloc. Similarly endcgocallback's approximation of entersyscall did not call matchmg to kick off new OS threads when necessary, which caused the bug in issue 1560. Fixes #1560. R=iant CC=golang-dev https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
// Record top of stack for use by mcall.
// Once we call schedule we're never coming back,
// so other calls can reuse this stack space.
2014-06-26 09:54:39 -06:00
runtime·gosave(&g->m->g0->sched);
g->m->g0->sched.pc = (uintptr)-1; // make sure it is never used
g->m->g0->stackguard = g->m->g0->stackguard0; // cgo sets only stackguard0, copy it to stackguard
runtime·asminit();
runtime·minit();
// Install signal handlers; after minit so that minit can
// prepare the thread to be able to handle the signals.
2014-06-26 09:54:39 -06:00
if(g->m == &runtime·m0)
runtime·initsig();
2014-06-26 09:54:39 -06:00
if(g->m->mstartfn)
g->m->mstartfn();
2014-06-26 09:54:39 -06:00
if(g->m->helpgc) {
g->m->helpgc = 0;
stopm();
2014-06-26 09:54:39 -06:00
} else if(g->m != &runtime·m0) {
acquirep(g->m->nextp);
g->m->nextp = nil;
}
schedule();
// TODO(brainman): This point is never reached, because scheduler
// does not release os threads at the moment. But once this path
// is enabled, we must remove our seh here.
}
// When running with cgo, we call _cgo_thread_start
// to start threads for us so that we can play nicely with
// foreign code.
void (*_cgo_thread_start)(void*);
typedef struct CgoThreadStart CgoThreadStart;
struct CgoThreadStart
{
G *g;
uintptr *tls;
void (*fn)(void);
};
// Allocate a new m unassociated with any thread.
// Can use p for allocation context if needed.
M*
runtime·allocm(P *p)
{
M *mp;
static Type *mtype; // The Go type M
2014-06-26 09:54:39 -06:00
g->m->locks++; // disable GC because it can be called from sysmon
if(g->m->p == nil)
acquirep(p); // temporarily borrow p for mallocs in this function
if(mtype == nil) {
Eface e;
runtime·gc_m_ptr(&e);
mtype = ((PtrType*)e.type)->elem;
}
mp = runtime·cnew(mtype);
mcommoninit(mp);
// In case of cgo or Solaris, pthread_create will make us a stack.
// Windows will layout sched stack on OS stack.
if(runtime·iscgo || Solaris || Windows)
mp->g0 = runtime·malg(-1);
else
mp->g0 = runtime·malg(8192);
2014-06-26 09:54:39 -06:00
mp->g0->m = mp;
2014-06-26 09:54:39 -06:00
if(p == g->m->p)
releasep();
2014-06-26 09:54:39 -06:00
g->m->locks--;
if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
g->stackguard0 = StackPreempt;
return mp;
}
static G*
allocg(void)
{
G *gp;
static Type *gtype;
if(gtype == nil) {
Eface e;
runtime·gc_g_ptr(&e);
gtype = ((PtrType*)e.type)->elem;
}
gp = runtime·cnew(gtype);
return gp;
}
static M* lockextra(bool nilokay);
static void unlockextra(M*);
// needm is called when a cgo callback happens on a
// thread without an m (a thread not created by Go).
// In this case, needm is expected to find an m to use
// and return with m, g initialized correctly.
// Since m and g are not set now (likely nil, but see below)
// needm is limited in what routines it can call. In particular
// it can only call nosplit functions (textflag 7) and cannot
// do any scheduling that requires an m.
//
// In order to avoid needing heavy lifting here, we adopt
// the following strategy: there is a stack of available m's
// that can be stolen. Using compare-and-swap
// to pop from the stack has ABA races, so we simulate
// a lock by doing an exchange (via casp) to steal the stack
// head and replace the top pointer with MLOCKED (1).
// This serves as a simple spin lock that we can use even
// without an m. The thread that locks the stack in this way
// unlocks the stack by storing a valid stack head pointer.
//
// In order to make sure that there is always an m structure
// available to be stolen, we maintain the invariant that there
// is always one more than needed. At the beginning of the
// program (if cgo is in use) the list is seeded with a single m.
// If needm finds that it has taken the last m off the list, its job
// is - once it has installed its own m so that it can do things like
// allocate memory - to create a spare m and put it on the list.
//
// Each of these extra m's also has a g0 and a curg that are
// pressed into service as the scheduling stack and current
// goroutine for the duration of the cgo callback.
//
// When the callback is done with the m, it calls dropm to
// put the m back on the list.
#pragma textflag NOSPLIT
void
runtime·needm(byte x)
{
M *mp;
if(runtime·needextram) {
// Can happen if C/C++ code calls Go from a global ctor.
// Can not throw, because scheduler is not initialized yet.
runtime·write(2, "fatal error: cgo callback before cgo call\n",
sizeof("fatal error: cgo callback before cgo call\n")-1);
runtime·exit(1);
}
// Lock extra list, take head, unlock popped list.
// nilokay=false is safe here because of the invariant above,
// that the extra list always contains or will soon contain
// at least one m.
mp = lockextra(false);
// Set needextram when we've just emptied the list,
// so that the eventual call into cgocallbackg will
// allocate a new m for the extra list. We delay the
// allocation until then so that it can be done
// after exitsyscall makes sure it is okay to be
// running at all (that is, there's no garbage collection
// running right now).
mp->needextram = mp->schedlink == nil;
unlockextra(mp->schedlink);
2014-06-26 09:54:39 -06:00
// Install g (= m->g0) and set the stack bounds
// to match the current stack. We don't actually know
// how big the stack is, like we don't know how big any
// scheduling stack is, but we assume there's at least 32 kB,
// which is more than enough for us.
2014-06-26 09:54:39 -06:00
runtime·setg(mp->g0);
g->stackbase = (uintptr)(&x + 1024);
g->stackguard = (uintptr)(&x - 32*1024);
g->stackguard0 = g->stackguard;
// Initialize this thread to use the m.
runtime·asminit();
runtime·minit();
}
// newextram allocates an m and puts it on the extra list.
// It is called with a working local m, so that it can do things
// like call schedlock and allocate.
void
runtime·newextram(void)
{
M *mp, *mnext;
G *gp;
// Create extra goroutine locked to extra m.
// The goroutine is the context in which the cgo callback will run.
// The sched.pc will never be returned to, but setting it to
// runtime.goexit makes clear to the traceback routines where
// the goroutine stack ends.
mp = runtime·allocm(nil);
gp = runtime·malg(4096);
gp->sched.pc = (uintptr)runtime·goexit;
gp->sched.sp = gp->stackbase;
runtime: record proper goroutine state during stack split Until now, the goroutine state has been scattered during the execution of newstack and oldstack. It's all there, and those routines know how to get back to a working goroutine, but other pieces of the system, like stack traces, do not. If something does interrupt the newstack or oldstack execution, the rest of the system can't understand the goroutine. For example, if newstack decides there is an overflow and calls throw, the stack tracer wouldn't dump the goroutine correctly. For newstack to save a useful state snapshot, it needs to be able to rewind the PC in the function that triggered the split back to the beginning of the function. (The PC is a few instructions in, just after the call to morestack.) To make that possible, we change the prologues to insert a jmp back to the beginning of the function after the call to morestack. That is, the prologue used to be roughly: TEXT myfunc check for split jmpcond nosplit call morestack nosplit: sub $xxx, sp Now an extra instruction is inserted after the call: TEXT myfunc start: check for split jmpcond nosplit call morestack jmp start nosplit: sub $xxx, sp The jmp is not executed directly. It is decoded and simulated by runtime.rewindmorestack to discover the beginning of the function, and then the call to morestack returns directly to the start label instead of to the jump instruction. So logically the jmp is still executed, just not by the cpu. The prologue thus repeats in the case of a function that needs a stack split, but against the cost of the split itself, the extra few instructions are noise. The repeated prologue has the nice effect of making a stack split double-check that the new stack is big enough: if morestack happens to return on a too-small stack, we'll now notice before corruption happens. The ability for newstack to rewind to the beginning of the function should help preemption too. If newstack decides that it was called for preemption instead of a stack split, it now has the goroutine state correctly paused if rescheduling is needed, and when the goroutine can run again, it can return to the start label on its original stack and re-execute the split check. Here is an example of a split stack overflow showing the full trace, without any special cases in the stack printer. (This one was triggered by making the split check incorrect.) runtime: newstack framesize=0x0 argsize=0x18 sp=0x6aebd0 stack=[0x6b0000, 0x6b0fa0] morebuf={pc:0x69f5b sp:0x6aebd8 lr:0x0} sched={pc:0x68880 sp:0x6aebd0 lr:0x0 ctxt:0x34e700} runtime: split stack overflow: 0x6aebd0 < 0x6b0000 fatal error: runtime: split stack overflow goroutine 1 [stack split]: runtime.mallocgc(0x290, 0x100000000, 0x1) /Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:21 fp=0x6aebd8 runtime.new() /Users/rsc/g/go/src/pkg/runtime/zmalloc_darwin_amd64.c:682 +0x5b fp=0x6aec08 go/build.(*Context).Import(0x5ae340, 0xc210030c71, 0xa, 0xc2100b4380, 0x1b, ...) /Users/rsc/g/go/src/pkg/go/build/build.go:424 +0x3a fp=0x6b00a0 main.loadImport(0xc210030c71, 0xa, 0xc2100b4380, 0x1b, 0xc2100b42c0, ...) /Users/rsc/g/go/src/cmd/go/pkg.go:249 +0x371 fp=0x6b01a8 main.(*Package).load(0xc21017c800, 0xc2100b42c0, 0xc2101828c0, 0x0, 0x0, ...) /Users/rsc/g/go/src/cmd/go/pkg.go:431 +0x2801 fp=0x6b0c98 main.loadPackage(0x369040, 0x7, 0xc2100b42c0, 0x0) /Users/rsc/g/go/src/cmd/go/pkg.go:709 +0x857 fp=0x6b0f80 ----- stack segment boundary ----- main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc2100e6c00, 0xc2100e5750, ...) /Users/rsc/g/go/src/cmd/go/build.go:539 +0x437 fp=0x6b14a0 main.(*builder).action(0xc2100902a0, 0x0, 0x0, 0xc21015b400, 0x2, ...) /Users/rsc/g/go/src/cmd/go/build.go:528 +0x1d2 fp=0x6b1658 main.(*builder).test(0xc2100902a0, 0xc210092000, 0x0, 0x0, 0xc21008ff60, ...) /Users/rsc/g/go/src/cmd/go/test.go:622 +0x1b53 fp=0x6b1f68 ----- stack segment boundary ----- main.runTest(0x5a6b20, 0xc21000a020, 0x2, 0x2) /Users/rsc/g/go/src/cmd/go/test.go:366 +0xd09 fp=0x6a5cf0 main.main() /Users/rsc/g/go/src/cmd/go/main.go:161 +0x4f9 fp=0x6a5f78 runtime.main() /Users/rsc/g/go/src/pkg/runtime/proc.c:183 +0x92 fp=0x6a5fa0 runtime.goexit() /Users/rsc/g/go/src/pkg/runtime/proc.c:1266 fp=0x6a5fa8 And here is a seg fault during oldstack: SIGSEGV: segmentation violation PC=0x1b2a6 runtime.oldstack() /Users/rsc/g/go/src/pkg/runtime/stack.c:159 +0x76 runtime.lessstack() /Users/rsc/g/go/src/pkg/runtime/asm_amd64.s:270 +0x22 goroutine 1 [stack unsplit]: fmt.(*pp).printArg(0x2102e64e0, 0xe5c80, 0x2102c9220, 0x73, 0x0, ...) /Users/rsc/g/go/src/pkg/fmt/print.go:818 +0x3d3 fp=0x221031e6f8 fmt.(*pp).doPrintf(0x2102e64e0, 0x12fb20, 0x2, 0x221031eb98, 0x1, ...) /Users/rsc/g/go/src/pkg/fmt/print.go:1183 +0x15cb fp=0x221031eaf0 fmt.Sprintf(0x12fb20, 0x2, 0x221031eb98, 0x1, 0x1, ...) /Users/rsc/g/go/src/pkg/fmt/print.go:234 +0x67 fp=0x221031eb40 flag.(*stringValue).String(0x2102c9210, 0x1, 0x0) /Users/rsc/g/go/src/pkg/flag/flag.go:180 +0xb3 fp=0x221031ebb0 flag.(*FlagSet).Var(0x2102f6000, 0x293d38, 0x2102c9210, 0x143490, 0xa, ...) /Users/rsc/g/go/src/pkg/flag/flag.go:633 +0x40 fp=0x221031eca0 flag.(*FlagSet).StringVar(0x2102f6000, 0x2102c9210, 0x143490, 0xa, 0x12fa60, ...) /Users/rsc/g/go/src/pkg/flag/flag.go:550 +0x91 fp=0x221031ece8 flag.(*FlagSet).String(0x2102f6000, 0x143490, 0xa, 0x12fa60, 0x0, ...) /Users/rsc/g/go/src/pkg/flag/flag.go:563 +0x87 fp=0x221031ed38 flag.String(0x143490, 0xa, 0x12fa60, 0x0, 0x161950, ...) /Users/rsc/g/go/src/pkg/flag/flag.go:570 +0x6b fp=0x221031ed80 testing.init() /Users/rsc/g/go/src/pkg/testing/testing.go:-531 +0xbb fp=0x221031edc0 strings_test.init() /Users/rsc/g/go/src/pkg/strings/strings_test.go:1115 +0x62 fp=0x221031ef70 main.init() strings/_test/_testmain.go:90 +0x3d fp=0x221031ef78 runtime.main() /Users/rsc/g/go/src/pkg/runtime/proc.c:180 +0x8a fp=0x221031efa0 runtime.goexit() /Users/rsc/g/go/src/pkg/runtime/proc.c:1269 fp=0x221031efa8 goroutine 2 [runnable]: runtime.MHeap_Scavenger() /Users/rsc/g/go/src/pkg/runtime/mheap.c:438 runtime.goexit() /Users/rsc/g/go/src/pkg/runtime/proc.c:1269 created by runtime.main /Users/rsc/g/go/src/pkg/runtime/proc.c:166 rax 0x23ccc0 rbx 0x23ccc0 rcx 0x0 rdx 0x38 rdi 0x2102c0170 rsi 0x221032cfe0 rbp 0x221032cfa0 rsp 0x7fff5fbff5b0 r8 0x2102c0120 r9 0x221032cfa0 r10 0x221032c000 r11 0x104ce8 r12 0xe5c80 r13 0x1be82baac718 r14 0x13091135f7d69200 r15 0x0 rip 0x1b2a6 rflags 0x10246 cs 0x2b fs 0x0 gs 0x0 Fixes #5723. R=r, dvyukov, go.peter.90, dave, iant CC=golang-dev https://golang.org/cl/10360048
2013-06-27 09:32:01 -06:00
gp->sched.lr = 0;
gp->sched.g = gp;
gp->syscallpc = gp->sched.pc;
gp->syscallsp = gp->sched.sp;
gp->syscallstack = gp->stackbase;
gp->syscallguard = gp->stackguard;
// malg returns status as Gidle, change to Gsyscall before adding to allg
// where GC will see it.
runtime·casgstatus(gp, Gidle, Gsyscall);
2014-06-26 09:54:39 -06:00
gp->m = mp;
mp->curg = gp;
mp->locked = LockInternal;
mp->lockedg = gp;
gp->lockedm = mp;
gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
if(raceenabled)
gp->racectx = runtime·racegostart(runtime·newextram);
// put on allg for garbage collector
allgadd(gp);
// Add m to the extra list.
mnext = lockextra(true);
mp->schedlink = mnext;
unlockextra(mp);
}
// dropm is called when a cgo callback has called needm but is now
// done with the callback and returning back into the non-Go thread.
// It puts the current m back onto the extra list.
//
// The main expense here is the call to signalstack to release the
// m's signal stack, and then the call to needm on the next callback
// from this thread. It is tempting to try to save the m for next time,
// which would eliminate both these costs, but there might not be
// a next time: the current thread (which Go does not control) might exit.
// If we saved the m for that thread, there would be an m leak each time
// such a thread exited. Instead, we acquire and release an m on each
// call. These should typically not be scheduling operations, just a few
// atomics, so the cost should be small.
//
// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
// variable using pthread_key_create. Unlike the pthread keys we already use
// on OS X, this dummy key would never be read by Go code. It would exist
// only so that we could register at thread-exit-time destructor.
// That destructor would put the m back onto the extra list.
// This is purely a performance optimization. The current version,
// in which dropm happens on each cgo call, is still correct too.
// We may have to keep the current version on systems with cgo
// but without pthreads, like Windows.
void
runtime·dropm(void)
{
M *mp, *mnext;
// Undo whatever initialization minit did during needm.
runtime·unminit();
// Clear m and g, and return m to the extra list.
// After the call to setmg we can only call nosplit functions.
2014-06-26 09:54:39 -06:00
mp = g->m;
runtime·setg(nil);
mnext = lockextra(true);
mp->schedlink = mnext;
unlockextra(mp);
}
#define MLOCKED ((M*)1)
// lockextra locks the extra list and returns the list head.
// The caller must unlock the list by storing a new list head
// to runtime.extram. If nilokay is true, then lockextra will
// return a nil list head if that's what it finds. If nilokay is false,
// lockextra will keep waiting until the list head is no longer nil.
#pragma textflag NOSPLIT
static M*
lockextra(bool nilokay)
{
M *mp;
void (*yield)(void);
for(;;) {
mp = runtime·atomicloadp(&runtime·extram);
if(mp == MLOCKED) {
yield = runtime·osyield;
yield();
continue;
}
if(mp == nil && !nilokay) {
runtime·usleep(1);
continue;
}
if(!runtime·casp(&runtime·extram, mp, MLOCKED)) {
yield = runtime·osyield;
yield();
continue;
}
break;
}
return mp;
}
#pragma textflag NOSPLIT
static void
unlockextra(M *mp)
{
runtime·atomicstorep(&runtime·extram, mp);
}
// Create a new m. It will start off with a call to fn, or else the scheduler.
static void
newm(void(*fn)(void), P *p)
{
M *mp;
mp = runtime·allocm(p);
mp->nextp = p;
mp->mstartfn = fn;
if(runtime·iscgo) {
CgoThreadStart ts;
if(_cgo_thread_start == nil)
runtime·throw("_cgo_thread_start missing");
ts.g = mp->g0;
ts.tls = mp->tls;
ts.fn = runtime·mstart;
runtime·asmcgocall(_cgo_thread_start, &ts);
return;
}
runtime·newosproc(mp, (byte*)mp->g0->stackbase);
}
// Stops execution of the current m until new work is available.
// Returns with acquired P.
static void
stopm(void)
{
2014-06-26 09:54:39 -06:00
if(g->m->locks)
runtime·throw("stopm holding locks");
2014-06-26 09:54:39 -06:00
if(g->m->p)
runtime·throw("stopm holding p");
2014-06-26 09:54:39 -06:00
if(g->m->spinning) {
g->m->spinning = false;
runtime·xadd(&runtime·sched.nmspinning, -1);
}
retry:
runtime·lock(&runtime·sched.lock);
2014-06-26 09:54:39 -06:00
mput(g->m);
runtime·unlock(&runtime·sched.lock);
2014-06-26 09:54:39 -06:00
runtime·notesleep(&g->m->park);
runtime·noteclear(&g->m->park);
if(g->m->helpgc) {
runtime·gchelper();
2014-06-26 09:54:39 -06:00
g->m->helpgc = 0;
g->m->mcache = nil;
goto retry;
}
2014-06-26 09:54:39 -06:00
acquirep(g->m->nextp);
g->m->nextp = nil;
}
static void
mspinning(void)
{
2014-06-26 09:54:39 -06:00
g->m->spinning = true;
}
// Schedules some M to run the p (creates an M if necessary).
// If p==nil, tries to get an idle P, if no idle P's does nothing.
static void
startm(P *p, bool spinning)
{
M *mp;
void (*fn)(void);
runtime·lock(&runtime·sched.lock);
if(p == nil) {
p = pidleget();
if(p == nil) {
runtime·unlock(&runtime·sched.lock);
if(spinning)
runtime·xadd(&runtime·sched.nmspinning, -1);
return;
}
}
mp = mget();
runtime·unlock(&runtime·sched.lock);
if(mp == nil) {
fn = nil;
if(spinning)
fn = mspinning;
newm(fn, p);
return;
}
if(mp->spinning)
runtime·throw("startm: m is spinning");
if(mp->nextp)
runtime·throw("startm: m has p");
mp->spinning = spinning;
mp->nextp = p;
runtime·notewakeup(&mp->park);
}
// Hands off P from syscall or locked M.
static void
handoffp(P *p)
{
// if it has local work, start it straight away
if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
startm(p, false);
return;
}
// no local work, check that there are no spinning/idle M's,
// otherwise our help is not required
if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 && // TODO: fast atomic
runtime·cas(&runtime·sched.nmspinning, 0, 1)){
startm(p, true);
return;
}
runtime·lock(&runtime·sched.lock);
if(runtime·sched.gcwaiting) {
p->status = Pgcstop;
if(--runtime·sched.stopwait == 0)
runtime·notewakeup(&runtime·sched.stopnote);
runtime·unlock(&runtime·sched.lock);
return;
}
if(runtime·sched.runqsize) {
runtime·unlock(&runtime·sched.lock);
startm(p, false);
return;
}
// If this is the last running P and nobody is polling network,
// need to wakeup another M to poll network.
if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
runtime·unlock(&runtime·sched.lock);
startm(p, false);
return;
}
pidleput(p);
runtime·unlock(&runtime·sched.lock);
}
// Tries to add one more P to execute G's.
// Called when a G is made runnable (newproc, ready).
static void
wakep(void)
{
// be conservative about spinning threads
if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
return;
startm(nil, true);
}
// Stops execution of the current m that is locked to a g until the g is runnable again.
// Returns with acquired P.
static void
stoplockedm(void)
{
P *p;
uint32 status;
2014-06-26 09:54:39 -06:00
if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
runtime·throw("stoplockedm: inconsistent locking");
2014-06-26 09:54:39 -06:00
if(g->m->p) {
// Schedule another M to run this p.
p = releasep();
handoffp(p);
}
incidlelocked(1);
// Wait until another thread schedules lockedg again.
2014-06-26 09:54:39 -06:00
runtime·notesleep(&g->m->park);
runtime·noteclear(&g->m->park);
status = runtime·readgstatus(g->m->lockedg);
if((status&~Gscan) != Grunnable){
runtime·printf("runtime:stoplockedm: g is not Grunnable or Gscanrunnable");
dumpgstatus(g);
runtime·throw("stoplockedm: not runnable");
}
2014-06-26 09:54:39 -06:00
acquirep(g->m->nextp);
g->m->nextp = nil;
}
// Schedules the locked m to run the locked gp.
static void
startlockedm(G *gp)
{
M *mp;
P *p;
mp = gp->lockedm;
2014-06-26 09:54:39 -06:00
if(mp == g->m)
runtime·throw("startlockedm: locked to me");
if(mp->nextp)
runtime·throw("startlockedm: m has p");
// directly handoff current P to the locked m
incidlelocked(-1);
p = releasep();
mp->nextp = p;
runtime·notewakeup(&mp->park);
stopm();
}
// Stops the current m for stoptheworld.
// Returns when the world is restarted.
static void
gcstopm(void)
{
P *p;
if(!runtime·sched.gcwaiting)
runtime·throw("gcstopm: not waiting for gc");
2014-06-26 09:54:39 -06:00
if(g->m->spinning) {
g->m->spinning = false;
runtime·xadd(&runtime·sched.nmspinning, -1);
}
p = releasep();
runtime·lock(&runtime·sched.lock);
p->status = Pgcstop;
if(--runtime·sched.stopwait == 0)
runtime·notewakeup(&runtime·sched.stopnote);
runtime·unlock(&runtime·sched.lock);
stopm();
}
// Schedules gp to run on the current M.
// Never returns.
static void
execute(G *gp)
{
int32 hz;
runtime·casgstatus(gp, Grunnable, Grunning);
gp->waitsince = 0;
gp->preempt = false;
gp->stackguard0 = gp->stackguard;
2014-06-26 09:54:39 -06:00
g->m->p->schedtick++;
g->m->curg = gp;
gp->m = g->m;
// Check whether the profiler needs to be turned on or off.
hz = runtime·sched.profilehz;
2014-06-26 09:54:39 -06:00
if(g->m->profilehz != hz)
runtime·resetcpuprofiler(hz);
runtime·gogo(&gp->sched);
2008-07-14 15:34:27 -06:00
}
// Finds a runnable goroutine to execute.
// Tries to steal from other P's, get g from global queue, poll network.
static G*
findrunnable(void)
{
G *gp;
P *p;
int32 i;
top:
if(runtime·sched.gcwaiting) {
gcstopm();
goto top;
}
if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
runtime·ready(gp);
// local runq
2014-06-26 09:54:39 -06:00
gp = runqget(g->m->p);
if(gp)
return gp;
// global runq
if(runtime·sched.runqsize) {
runtime·lock(&runtime·sched.lock);
2014-06-26 09:54:39 -06:00
gp = globrunqget(g->m->p, 0);
runtime·unlock(&runtime·sched.lock);
if(gp)
return gp;
}
// poll network
gp = runtime·netpoll(false); // non-blocking
if(gp) {
injectglist(gp->schedlink);
runtime·casgstatus(gp, Gwaiting, Grunnable);
return gp;
}
// If number of spinning M's >= number of busy P's, block.
// This is necessary to prevent excessive CPU consumption
// when GOMAXPROCS>>1 but the program parallelism is low.
2014-06-26 09:54:39 -06:00
if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle)) // TODO: fast atomic
goto stop;
2014-06-26 09:54:39 -06:00
if(!g->m->spinning) {
g->m->spinning = true;
runtime·xadd(&runtime·sched.nmspinning, 1);
}
// random steal from other P's
for(i = 0; i < 2*runtime·gomaxprocs; i++) {
if(runtime·sched.gcwaiting)
goto top;
p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
2014-06-26 09:54:39 -06:00
if(p == g->m->p)
gp = runqget(p);
else
2014-06-26 09:54:39 -06:00
gp = runqsteal(g->m->p, p);
if(gp)
return gp;
}
stop:
// return P and block
runtime·lock(&runtime·sched.lock);
if(runtime·sched.gcwaiting) {
runtime·unlock(&runtime·sched.lock);
goto top;
}
if(runtime·sched.runqsize) {
2014-06-26 09:54:39 -06:00
gp = globrunqget(g->m->p, 0);
runtime·unlock(&runtime·sched.lock);
return gp;
}
p = releasep();
pidleput(p);
runtime·unlock(&runtime·sched.lock);
2014-06-26 09:54:39 -06:00
if(g->m->spinning) {
g->m->spinning = false;
runtime·xadd(&runtime·sched.nmspinning, -1);
}
// check all runqueues once again
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p && p->runqhead != p->runqtail) {
runtime·lock(&runtime·sched.lock);
p = pidleget();
runtime·unlock(&runtime·sched.lock);
if(p) {
acquirep(p);
goto top;
}
break;
}
}
// poll network
if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
2014-06-26 09:54:39 -06:00
if(g->m->p)
runtime·throw("findrunnable: netpoll with p");
2014-06-26 09:54:39 -06:00
if(g->m->spinning)
runtime·throw("findrunnable: netpoll with spinning");
gp = runtime·netpoll(true); // block until new work is available
runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
if(gp) {
runtime·lock(&runtime·sched.lock);
p = pidleget();
runtime·unlock(&runtime·sched.lock);
if(p) {
acquirep(p);
injectglist(gp->schedlink);
runtime·casgstatus(gp, Gwaiting, Grunnable);
return gp;
}
injectglist(gp);
}
}
stopm();
goto top;
}
static void
resetspinning(void)
{
int32 nmspinning;
2014-06-26 09:54:39 -06:00
if(g->m->spinning) {
g->m->spinning = false;
nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
if(nmspinning < 0)
runtime·throw("findrunnable: negative nmspinning");
} else
nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
// M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
// so see if we need to wakeup another P here.
if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
wakep();
}
// Injects the list of runnable G's into the scheduler.
// Can run concurrently with GC.
static void
injectglist(G *glist)
{
int32 n;
G *gp;
if(glist == nil)
return;
runtime·lock(&runtime·sched.lock);
for(n = 0; glist; n++) {
gp = glist;
glist = gp->schedlink;
runtime·casgstatus(gp, Gwaiting, Grunnable);
globrunqput(gp);
}
runtime·unlock(&runtime·sched.lock);
for(; n && runtime·sched.npidle; n--)
startm(nil, false);
}
// One round of scheduler: find a runnable goroutine and execute it.
// Never returns.
static void
schedule(void)
{
G *gp;
uint32 tick;
2014-06-26 09:54:39 -06:00
if(g->m->locks)
runtime·throw("schedule: holding locks");
if(g->m->lockedg) {
stoplockedm();
execute(g->m->lockedg); // Never returns.
}
top:
if(runtime·sched.gcwaiting) {
gcstopm();
goto top;
}
gp = nil;
// Check the global runnable queue once in a while to ensure fairness.
// Otherwise two goroutines can completely occupy the local runqueue
// by constantly respawning each other.
2014-06-26 09:54:39 -06:00
tick = g->m->p->schedtick;
// This is a fancy way to say tick%61==0,
// it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
runtime·lock(&runtime·sched.lock);
2014-06-26 09:54:39 -06:00
gp = globrunqget(g->m->p, 1);
runtime·unlock(&runtime·sched.lock);
if(gp)
resetspinning();
}
if(gp == nil) {
2014-06-26 09:54:39 -06:00
gp = runqget(g->m->p);
if(gp && g->m->spinning)
runtime·throw("schedule: spinning with local work");
}
if(gp == nil) {
gp = findrunnable(); // blocks until work is available
resetspinning();
}
if(gp->lockedm) {
// Hands off own p to the locked m,
// then blocks waiting for a new p.
startlockedm(gp);
goto top;
}
execute(gp);
}
// dropg removes the association between m and the current goroutine m->curg (gp for short).
// Typically a caller sets gp's status away from Grunning and then
// immediately calls dropg to finish the job. The caller is also responsible
// for arranging that gp will be restarted using runtime·ready at an
// appropriate time. After calling dropg and arranging for gp to be
// readied later, the caller can do other work but eventually should
// call schedule to restart the scheduling of goroutines on this m.
static void
dropg(void)
{
if(g->m->lockedg == nil) {
g->m->curg->m = nil;
g->m->curg = nil;
}
}
// Puts the current goroutine into a waiting state and calls unlockf.
// If unlockf returns false, the goroutine is resumed.
void
runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
{
void (*fn)(G*);
2014-06-26 09:54:39 -06:00
g->m->waitlock = lock;
g->m->waitunlockf = unlockf;
g->waitreason = reason;
fn = runtime·park_m;
runtime·mcall(&fn);
}
bool
runtime·parkunlock_c(G *gp, void *lock)
{
USED(gp);
runtime·unlock(lock);
return true;
}
// Puts the current goroutine into a waiting state and unlocks the lock.
// The goroutine can be made runnable again by calling runtime·ready(gp).
void
runtime·parkunlock(Mutex *lock, String reason)
{
runtime·park(runtime·parkunlock_c, lock, reason);
}
// runtime·park continuation on g0.
void
runtime·park_m(G *gp)
{
bool ok;
runtime·casgstatus(gp, Grunning, Gwaiting);
dropg();
2014-06-26 09:54:39 -06:00
if(g->m->waitunlockf) {
ok = g->m->waitunlockf(gp, g->m->waitlock);
g->m->waitunlockf = nil;
g->m->waitlock = nil;
if(!ok) {
runtime·casgstatus(gp, Gwaiting, Grunnable);
execute(gp); // Schedule it back, never returns.
}
}
schedule();
}
// Scheduler yield.
void
runtime·gosched(void)
{
void (*fn)(G*);
fn = runtime·gosched_m;
runtime·mcall(&fn);
}
// runtime·gosched continuation on g0.
void
runtime·gosched_m(G *gp)
{
uint32 status;
status = runtime·readgstatus(gp);
if((status&~Gscan) != Grunning){
dumpgstatus(gp);
runtime·throw("bad g status");
}
runtime·casgstatus(gp, Grunning, Grunnable);
dropg();
runtime·lock(&runtime·sched.lock);
globrunqput(gp);
runtime·unlock(&runtime·sched.lock);
schedule();
}
// Finishes execution of the current goroutine.
// Need to mark it as nosplit, because it runs with sp > stackbase (as runtime·lessstack).
// Since it does not return it does not matter. But if it is preempted
// at the split stack check, GC will complain about inconsistent sp.
#pragma textflag NOSPLIT
void
runtime·goexit(void)
{
void (*fn)(G*);
if(raceenabled)
runtime·racegoend();
fn = goexit0;
runtime·mcall(&fn);
}
// runtime·goexit continuation on g0.
static void
goexit0(G *gp)
{
runtime·casgstatus(gp, Grunning, Gdead);
gp->m = nil;
gp->lockedm = nil;
g->m->lockedg = nil;
gp->paniconfault = 0;
gp->defer = nil; // should be true already but just in case.
gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
gp->writebuf.array = nil;
gp->writebuf.len = 0;
gp->writebuf.cap = 0;
gp->waitreason.str = nil;
gp->waitreason.len = 0;
gp->param = nil;
dropg();
2014-06-26 09:54:39 -06:00
if(g->m->locked & ~LockExternal) {
runtime·printf("invalid m->locked = %d\n", g->m->locked);
runtime·throw("internal lockOSThread error");
}
2014-06-26 09:54:39 -06:00
g->m->locked = 0;
runtime·unwindstack(gp, nil);
2014-06-26 09:54:39 -06:00
gfput(g->m->p, gp);
schedule();
}
#pragma textflag NOSPLIT
static void
save(void *pc, uintptr sp)
{
g->sched.pc = (uintptr)pc;
g->sched.sp = sp;
g->sched.lr = 0;
g->sched.ret = 0;
g->sched.ctxt = 0;
g->sched.g = g;
}
static void entersyscall_bad(void);
static void entersyscall_sysmon(void);
static void entersyscall_gcwait(void);
// The goroutine g is about to enter a system call.
// Record that it's not using the cpu anymore.
// This is called only from the go syscall library and cgocall,
// not from the low-level system calls used by the runtime.
runtime: stack split + garbage collection bug The g->sched.sp saved stack pointer and the g->stackbase and g->stackguard stack bounds can change even while "the world is stopped", because a goroutine has to call functions (and therefore might split its stack) when exiting a system call to check whether the world is stopped (and if so, wait until the world continues). That means the garbage collector cannot access those values safely (without a race) for goroutines executing system calls. Instead, save a consistent triple in g->gcsp, g->gcstack, g->gcguard during entersyscall and have the garbage collector refer to those. The old code was occasionally seeing (because of the race) an sp and stk that did not correspond to each other, so that stk - sp was not the number of stack bytes following sp. In that case, if sp < stk then the call scanblock(sp, stk - sp) scanned too many bytes (anything between the two pointers, which pointed into different allocation blocks). If sp > stk then stk - sp wrapped around. On 32-bit, stk - sp is a uintptr (uint32) converted to int64 in the call to scanblock, so a large (~4G) but positive number. Scanblock would try to scan that many bytes and eventually fault accessing unmapped memory. On 64-bit, stk - sp is a uintptr (uint64) promoted to int64 in the call to scanblock, so a negative number. Scanblock would not scan anything, possibly causing in-use blocks to be freed. In short, 32-bit platforms would have seen either ineffective garbage collection or crashes during garbage collection, while 64-bit platforms would have seen either ineffective or incorrect garbage collection. You can see the invalid arguments to scanblock in the stack traces in issue 1620. Fixes #1620. Fixes #1746. R=iant, r CC=golang-dev https://golang.org/cl/4437075
2011-04-27 21:21:12 -06:00
//
// Entersyscall cannot split the stack: the runtime·gosave must
runtime: stack split + garbage collection bug The g->sched.sp saved stack pointer and the g->stackbase and g->stackguard stack bounds can change even while "the world is stopped", because a goroutine has to call functions (and therefore might split its stack) when exiting a system call to check whether the world is stopped (and if so, wait until the world continues). That means the garbage collector cannot access those values safely (without a race) for goroutines executing system calls. Instead, save a consistent triple in g->gcsp, g->gcstack, g->gcguard during entersyscall and have the garbage collector refer to those. The old code was occasionally seeing (because of the race) an sp and stk that did not correspond to each other, so that stk - sp was not the number of stack bytes following sp. In that case, if sp < stk then the call scanblock(sp, stk - sp) scanned too many bytes (anything between the two pointers, which pointed into different allocation blocks). If sp > stk then stk - sp wrapped around. On 32-bit, stk - sp is a uintptr (uint32) converted to int64 in the call to scanblock, so a large (~4G) but positive number. Scanblock would try to scan that many bytes and eventually fault accessing unmapped memory. On 64-bit, stk - sp is a uintptr (uint64) promoted to int64 in the call to scanblock, so a negative number. Scanblock would not scan anything, possibly causing in-use blocks to be freed. In short, 32-bit platforms would have seen either ineffective garbage collection or crashes during garbage collection, while 64-bit platforms would have seen either ineffective or incorrect garbage collection. You can see the invalid arguments to scanblock in the stack traces in issue 1620. Fixes #1620. Fixes #1746. R=iant, r CC=golang-dev https://golang.org/cl/4437075
2011-04-27 21:21:12 -06:00
// make g->sched refer to the caller's stack segment, because
// entersyscall is going to return immediately after.
//
// Nothing entersyscall calls can split the stack either.
// We cannot safely move the stack during an active call to syscall,
// because we do not know which of the uintptr arguments are
// really pointers (back into the stack).
// In practice, this means that we make the fast path run through
// entersyscall doing no-split things, and the slow path has to use onM
// to run bigger things on the m stack.
#pragma textflag NOSPLIT
void
·entersyscall(int32 dummy)
{
void (*fn)(void);
// Disable preemption because during this function g is in Gsyscall status,
// but can have inconsistent g->sched, do not let GC observe it.
2014-06-26 09:54:39 -06:00
g->m->locks++;
// Entersyscall must not call any function that might split/grow the stack.
// (See details in comment above.)
// Catch calls that might, by replacing the stack guard with something that
// will trip any stack check and leaving a flag to tell newstack to die.
g->stackguard0 = StackPreempt;
g->throwsplit = 1;
// Leave SP around for GC and traceback.
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
g->syscallsp = g->sched.sp;
g->syscallpc = g->sched.pc;
g->syscallstack = g->stackbase;
g->syscallguard = g->stackguard;
runtime·casgstatus(g, Grunning, Gsyscall);
if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->syscallsp) {
fn = entersyscall_bad;
runtime·onM(&fn);
}
if(runtime·atomicload(&runtime·sched.sysmonwait)) { // TODO: fast atomic
fn = entersyscall_sysmon;
runtime·onM(&fn);
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
}
2014-06-26 09:54:39 -06:00
g->m->mcache = nil;
g->m->p->m = nil;
runtime·atomicstore(&g->m->p->status, Psyscall);
if(runtime·sched.gcwaiting) {
fn = entersyscall_gcwait;
runtime·onM(&fn);
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
}
// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
// We set stackguard to StackPreempt so that first split stack check calls morestack.
// Morestack detects this case and throws.
g->stackguard0 = StackPreempt;
2014-06-26 09:54:39 -06:00
g->m->locks--;
}
static void
entersyscall_bad(void)
{
G *gp;
gp = g->m->curg;
runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
gp->syscallsp, gp->syscallguard-StackGuard, gp->syscallstack);
runtime·throw("entersyscall");
}
static void
entersyscall_sysmon(void)
{
runtime·lock(&runtime·sched.lock);
if(runtime·atomicload(&runtime·sched.sysmonwait)) {
runtime·atomicstore(&runtime·sched.sysmonwait, 0);
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched.lock);
}
static void
entersyscall_gcwait(void)
{
runtime·lock(&runtime·sched.lock);
if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
if(--runtime·sched.stopwait == 0)
runtime·notewakeup(&runtime·sched.stopnote);
}
runtime·unlock(&runtime·sched.lock);
}
static void entersyscallblock_handoff(void);
// The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
#pragma textflag NOSPLIT
void
·entersyscallblock(int32 dummy)
{
void (*fn)(void);
2014-06-26 09:54:39 -06:00
g->m->locks++; // see comment in entersyscall
g->throwsplit = 1;
g->stackguard0 = StackPreempt; // see comment in entersyscall
// Leave SP around for GC and traceback.
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
g->syscallsp = g->sched.sp;
g->syscallpc = g->sched.pc;
g->syscallstack = g->stackbase;
g->syscallguard = g->stackguard;
runtime·casgstatus(g, Grunning, Gsyscall);
if(g->syscallsp < g->syscallguard-StackGuard || g->syscallstack < g->syscallsp) {
fn = entersyscall_bad;
runtime·onM(&fn);
}
fn = entersyscallblock_handoff;
runtime·onM(&fn);
// Resave for traceback during blocked call.
save(runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
2014-06-26 09:54:39 -06:00
g->m->locks--;
}
static void
entersyscallblock_handoff(void)
{
handoffp(releasep());
}
// The goroutine g exited its system call.
// Arrange for it to run on a cpu again.
// This is called only from the go syscall library, not
// from the low-level system calls used by the runtime.
#pragma textflag NOSPLIT
void
runtime·exitsyscall(void)
{
void (*fn)(G*);
2014-06-26 09:54:39 -06:00
g->m->locks++; // see comment in entersyscall
g->waitsince = 0;
if(exitsyscallfast()) {
// There's a cpu for us, so we can run.
2014-06-26 09:54:39 -06:00
g->m->p->syscalltick++;
// We need to cas the status and scan before resuming...
runtime·casgstatus(g, Gsyscall, Grunning);
// Garbage collector isn't running (since we are),
// so okay to clear gcstack and gcsp.
g->syscallstack = (uintptr)nil;
g->syscallsp = (uintptr)nil;
2014-06-26 09:54:39 -06:00
g->m->locks--;
if(g->preempt) {
// restore the preemption request in case we've cleared it in newstack
g->stackguard0 = StackPreempt;
} else {
// otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
g->stackguard0 = g->stackguard;
}
g->throwsplit = 0;
return;
}
2014-06-26 09:54:39 -06:00
g->m->locks--;
// Call the scheduler.
fn = exitsyscall0;
runtime·mcall(&fn);
// Scheduler returned, so we're allowed to run now.
runtime: stack split + garbage collection bug The g->sched.sp saved stack pointer and the g->stackbase and g->stackguard stack bounds can change even while "the world is stopped", because a goroutine has to call functions (and therefore might split its stack) when exiting a system call to check whether the world is stopped (and if so, wait until the world continues). That means the garbage collector cannot access those values safely (without a race) for goroutines executing system calls. Instead, save a consistent triple in g->gcsp, g->gcstack, g->gcguard during entersyscall and have the garbage collector refer to those. The old code was occasionally seeing (because of the race) an sp and stk that did not correspond to each other, so that stk - sp was not the number of stack bytes following sp. In that case, if sp < stk then the call scanblock(sp, stk - sp) scanned too many bytes (anything between the two pointers, which pointed into different allocation blocks). If sp > stk then stk - sp wrapped around. On 32-bit, stk - sp is a uintptr (uint32) converted to int64 in the call to scanblock, so a large (~4G) but positive number. Scanblock would try to scan that many bytes and eventually fault accessing unmapped memory. On 64-bit, stk - sp is a uintptr (uint64) promoted to int64 in the call to scanblock, so a negative number. Scanblock would not scan anything, possibly causing in-use blocks to be freed. In short, 32-bit platforms would have seen either ineffective garbage collection or crashes during garbage collection, while 64-bit platforms would have seen either ineffective or incorrect garbage collection. You can see the invalid arguments to scanblock in the stack traces in issue 1620. Fixes #1620. Fixes #1746. R=iant, r CC=golang-dev https://golang.org/cl/4437075
2011-04-27 21:21:12 -06:00
// Delete the gcstack information that we left for
// the garbage collector during the system call.
// Must wait until now because until gosched returns
// we don't know for sure that the garbage collector
// is not running.
g->syscallstack = (uintptr)nil;
g->syscallsp = (uintptr)nil;
2014-06-26 09:54:39 -06:00
g->m->p->syscalltick++;
g->throwsplit = 0;
}
static void exitsyscallfast_pidle(void);
#pragma textflag NOSPLIT
static bool
exitsyscallfast(void)
{
void (*fn)(void);
// Freezetheworld sets stopwait but does not retake P's.
if(runtime·sched.stopwait) {
2014-06-26 09:54:39 -06:00
g->m->p = nil;
return false;
}
// Try to re-acquire the last P.
2014-06-26 09:54:39 -06:00
if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->status, Psyscall, Prunning)) {
// There's a cpu for us, so we can run.
2014-06-26 09:54:39 -06:00
g->m->mcache = g->m->p->mcache;
g->m->p->m = g->m;
return true;
}
// Try to get any other idle P.
2014-06-26 09:54:39 -06:00
g->m->p = nil;
if(runtime·sched.pidle) {
fn = exitsyscallfast_pidle;
runtime·onM(&fn);
if(g->m->scalararg[0]) {
g->m->scalararg[0] = 0;
return true;
}
}
return false;
}
static void
exitsyscallfast_pidle(void)
{
P *p;
runtime·lock(&runtime·sched.lock);
p = pidleget();
if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
runtime·atomicstore(&runtime·sched.sysmonwait, 0);
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched.lock);
if(p) {
acquirep(p);
g->m->scalararg[0] = 1;
} else
g->m->scalararg[0] = 0;
}
// runtime·exitsyscall slow path on g0.
// Failed to acquire P, enqueue gp as runnable.
static void
exitsyscall0(G *gp)
{
P *p;
runtime·casgstatus(gp, Gsyscall, Grunnable);
dropg();
runtime·lock(&runtime·sched.lock);
p = pidleget();
if(p == nil)
globrunqput(gp);
else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
runtime·atomicstore(&runtime·sched.sysmonwait, 0);
runtime·notewakeup(&runtime·sched.sysmonnote);
}
runtime·unlock(&runtime·sched.lock);
if(p) {
acquirep(p);
execute(gp); // Never returns.
}
2014-06-26 09:54:39 -06:00
if(g->m->lockedg) {
// Wait until another thread schedules gp and so m again.
stoplockedm();
execute(gp); // Never returns.
}
stopm();
schedule(); // Never returns.
}
static void
beforefork(void)
{
G *gp;
gp = g->m->curg;
// Fork can hang if preempted with signals frequently enough (see issue 5517).
// Ensure that we stay on the same M where we disable profiling.
gp->m->locks++;
if(gp->m->profilehz != 0)
runtime·resetcpuprofiler(0);
// This function is called before fork in syscall package.
// Code between fork and exec must not allocate memory nor even try to grow stack.
// Here we spoil g->stackguard to reliably detect any attempts to grow stack.
// runtime_AfterFork will undo this in parent process, but not in child.
gp->m->forkstackguard = gp->stackguard;
gp->stackguard0 = StackPreempt-1;
gp->stackguard = StackPreempt-1;
}
// Called from syscall package before fork.
#pragma textflag NOSPLIT
void
syscall·runtime_BeforeFork(void)
{
void (*fn)(void);
fn = beforefork;
runtime·onM(&fn);
}
static void
afterfork(void)
{
int32 hz;
G *gp;
gp = g->m->curg;
// See the comment in runtime_BeforeFork.
gp->stackguard0 = gp->m->forkstackguard;
gp->stackguard = gp->m->forkstackguard;
gp->m->forkstackguard = 0;
hz = runtime·sched.profilehz;
if(hz != 0)
runtime·resetcpuprofiler(hz);
gp->m->locks--;
}
// Called from syscall package after fork in parent.
#pragma textflag NOSPLIT
void
syscall·runtime_AfterFork(void)
{
void (*fn)(void);
fn = afterfork;
runtime·onM(&fn);
}
// Hook used by runtime·malg to call runtime·stackalloc on the
// scheduler stack. This exists because runtime·stackalloc insists
// on being called on the scheduler stack, to avoid trying to grow
// the stack while allocating a new stack segment.
runtime: scheduler, cgo reorganization * Change use of m->g0 stack (aka scheduler stack). * Provide runtime.mcall(f) to invoke f() on m->g0 stack. * Replace scheduler loop entry with runtime.mcall(schedule). Runtime.mcall eliminates the need for fake scheduler states that exist just to run a bit of code on the m->g0 stack (Grecovery, Gstackalloc). The elimination of the scheduler as a loop that stops and starts using gosave and gogo fixes a bad interaction with the way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled) C functions on that stack, and then when calling back into Go, it sets m->g0->sched.sp below the added call frames, so that other uses of m->g0's stack will not interfere with those frames. Unfortunately, gogo (longjmp) back to the scheduler loop at this point would end up running scheduler with the lower sp, which no longer points at a valid stack frame for a call to scheduler. If scheduler then wrote any function call arguments or local variables to where it expected the stack frame to be, it would overwrite other data on the stack. I realized this possibility while debugging a problem with calling complex Go code in a Go -> C -> Go cgo callback. This wasn't the bug I was looking for, it turns out, but I believe it is a real bug nonetheless. Switching to runtime.mcall, which only adds new frames to the stack and never jumps into functions running in existing ones, fixes this bug. * Move cgo-related code out of proc.c into cgocall.c. * Add very large comment describing cgo call sequences. * Simpilify, regularize cgo function implementations and names. * Add test suite as misc/cgo/test. Now the Go -> C path calls cgocall, which calls asmcgocall, and the C -> Go path calls cgocallback, which calls cgocallbackg. The shuffling, which affects mainly the callback case, moves most of the callback implementation to cgocallback running on the m->curg stack (not the m->g0 scheduler stack) and only while accounted for with $GOMAXPROCS (between calls to exitsyscall and entersyscall). The previous callback code did not block in startcgocallback's approximation to exitsyscall, so if, say, the garbage collector were running, it would still barge in and start doing things like call malloc. Similarly endcgocallback's approximation of entersyscall did not call matchmg to kick off new OS threads when necessary, which caused the bug in issue 1560. Fixes #1560. R=iant CC=golang-dev https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
static void
mstackalloc(G *gp)
{
G *newg;
uintptr size;
newg = (G*)gp->param;
size = newg->stacksize;
newg->stacksize = 0;
gp->param = runtime·stackalloc(newg, size);
runtime·gogo(&gp->sched);
runtime: scheduler, cgo reorganization * Change use of m->g0 stack (aka scheduler stack). * Provide runtime.mcall(f) to invoke f() on m->g0 stack. * Replace scheduler loop entry with runtime.mcall(schedule). Runtime.mcall eliminates the need for fake scheduler states that exist just to run a bit of code on the m->g0 stack (Grecovery, Gstackalloc). The elimination of the scheduler as a loop that stops and starts using gosave and gogo fixes a bad interaction with the way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled) C functions on that stack, and then when calling back into Go, it sets m->g0->sched.sp below the added call frames, so that other uses of m->g0's stack will not interfere with those frames. Unfortunately, gogo (longjmp) back to the scheduler loop at this point would end up running scheduler with the lower sp, which no longer points at a valid stack frame for a call to scheduler. If scheduler then wrote any function call arguments or local variables to where it expected the stack frame to be, it would overwrite other data on the stack. I realized this possibility while debugging a problem with calling complex Go code in a Go -> C -> Go cgo callback. This wasn't the bug I was looking for, it turns out, but I believe it is a real bug nonetheless. Switching to runtime.mcall, which only adds new frames to the stack and never jumps into functions running in existing ones, fixes this bug. * Move cgo-related code out of proc.c into cgocall.c. * Add very large comment describing cgo call sequences. * Simpilify, regularize cgo function implementations and names. * Add test suite as misc/cgo/test. Now the Go -> C path calls cgocall, which calls asmcgocall, and the C -> Go path calls cgocallback, which calls cgocallbackg. The shuffling, which affects mainly the callback case, moves most of the callback implementation to cgocallback running on the m->curg stack (not the m->g0 scheduler stack) and only while accounted for with $GOMAXPROCS (between calls to exitsyscall and entersyscall). The previous callback code did not block in startcgocallback's approximation to exitsyscall, so if, say, the garbage collector were running, it would still barge in and start doing things like call malloc. Similarly endcgocallback's approximation of entersyscall did not call matchmg to kick off new OS threads when necessary, which caused the bug in issue 1560. Fixes #1560. R=iant CC=golang-dev https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
}
// Allocate a new g, with a stack big enough for stacksize bytes.
G*
runtime·malg(int32 stacksize)
{
runtime: always run stackalloc on scheduler stack Avoids deadlocks like the one below, in which a stack split happened in order to call lock(&stacks), but then the stack unsplit cannot run because stacks is now locked. The only code calling stackalloc that wasn't on a scheduler stack already was malg, which creates a new goroutine. runtime.futex+0x23 /home/rsc/g/go/src/pkg/runtime/linux/amd64/sys.s:139 runtime.futex() futexsleep+0x50 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:51 futexsleep(0x5b0188, 0x300000003, 0x100020000, 0x4159e2) futexlock+0x85 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:119 futexlock(0x5b0188, 0x5b0188) runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x7f0d27b4a000) runtime.stackfree+0x4d /home/rsc/g/go/src/pkg/runtime/malloc.goc:336 runtime.stackfree(0x7f0d27b4a000, 0x1000, 0x8, 0x7fff37e1e218) runtime.oldstack+0xa6 /home/rsc/g/go/src/pkg/runtime/proc.c:705 runtime.oldstack() runtime.lessstack+0x22 /home/rsc/g/go/src/pkg/runtime/amd64/asm.s:224 runtime.lessstack() ----- lessstack called from goroutine 2 ----- runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x40a5e2) runtime.stackalloc+0x55 /home/rsc/g/go/src/pkg/runtime/malloc.c:316 runtime.stackalloc(0x1000, 0x4055b0) runtime.malg+0x3d /home/rsc/g/go/src/pkg/runtime/proc.c:803 runtime.malg(0x1000, 0x40add9) runtime.newproc1+0x12b /home/rsc/g/go/src/pkg/runtime/proc.c:854 runtime.newproc1(0xf840027440, 0x7f0d27b49230, 0x0, 0x49f238, 0x40, ...) runtime.newproc+0x2f /home/rsc/g/go/src/pkg/runtime/proc.c:831 runtime.newproc(0x0, 0xf840027440, 0xf800000010, 0x44b059) ... R=r, r2 CC=golang-dev https://golang.org/cl/4216045
2011-02-23 13:51:20 -07:00
G *newg;
byte *stk;
void (*fn)(G*);
if(StackTop < sizeof(Stktop)) {
runtime·printf("runtime: SizeofStktop=%d, should be >=%d\n", (int32)StackTop, (int32)sizeof(Stktop));
runtime·throw("runtime: bad stack.h");
}
newg = allocg();
if(stacksize >= 0) {
stacksize = runtime·round2(StackSystem + stacksize);
2014-06-26 09:54:39 -06:00
if(g == g->m->g0) {
runtime: always run stackalloc on scheduler stack Avoids deadlocks like the one below, in which a stack split happened in order to call lock(&stacks), but then the stack unsplit cannot run because stacks is now locked. The only code calling stackalloc that wasn't on a scheduler stack already was malg, which creates a new goroutine. runtime.futex+0x23 /home/rsc/g/go/src/pkg/runtime/linux/amd64/sys.s:139 runtime.futex() futexsleep+0x50 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:51 futexsleep(0x5b0188, 0x300000003, 0x100020000, 0x4159e2) futexlock+0x85 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:119 futexlock(0x5b0188, 0x5b0188) runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x7f0d27b4a000) runtime.stackfree+0x4d /home/rsc/g/go/src/pkg/runtime/malloc.goc:336 runtime.stackfree(0x7f0d27b4a000, 0x1000, 0x8, 0x7fff37e1e218) runtime.oldstack+0xa6 /home/rsc/g/go/src/pkg/runtime/proc.c:705 runtime.oldstack() runtime.lessstack+0x22 /home/rsc/g/go/src/pkg/runtime/amd64/asm.s:224 runtime.lessstack() ----- lessstack called from goroutine 2 ----- runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x40a5e2) runtime.stackalloc+0x55 /home/rsc/g/go/src/pkg/runtime/malloc.c:316 runtime.stackalloc(0x1000, 0x4055b0) runtime.malg+0x3d /home/rsc/g/go/src/pkg/runtime/proc.c:803 runtime.malg(0x1000, 0x40add9) runtime.newproc1+0x12b /home/rsc/g/go/src/pkg/runtime/proc.c:854 runtime.newproc1(0xf840027440, 0x7f0d27b49230, 0x0, 0x49f238, 0x40, ...) runtime.newproc+0x2f /home/rsc/g/go/src/pkg/runtime/proc.c:831 runtime.newproc(0x0, 0xf840027440, 0xf800000010, 0x44b059) ... R=r, r2 CC=golang-dev https://golang.org/cl/4216045
2011-02-23 13:51:20 -07:00
// running on scheduler stack already.
stk = runtime·stackalloc(newg, stacksize);
runtime: always run stackalloc on scheduler stack Avoids deadlocks like the one below, in which a stack split happened in order to call lock(&stacks), but then the stack unsplit cannot run because stacks is now locked. The only code calling stackalloc that wasn't on a scheduler stack already was malg, which creates a new goroutine. runtime.futex+0x23 /home/rsc/g/go/src/pkg/runtime/linux/amd64/sys.s:139 runtime.futex() futexsleep+0x50 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:51 futexsleep(0x5b0188, 0x300000003, 0x100020000, 0x4159e2) futexlock+0x85 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:119 futexlock(0x5b0188, 0x5b0188) runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x7f0d27b4a000) runtime.stackfree+0x4d /home/rsc/g/go/src/pkg/runtime/malloc.goc:336 runtime.stackfree(0x7f0d27b4a000, 0x1000, 0x8, 0x7fff37e1e218) runtime.oldstack+0xa6 /home/rsc/g/go/src/pkg/runtime/proc.c:705 runtime.oldstack() runtime.lessstack+0x22 /home/rsc/g/go/src/pkg/runtime/amd64/asm.s:224 runtime.lessstack() ----- lessstack called from goroutine 2 ----- runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x40a5e2) runtime.stackalloc+0x55 /home/rsc/g/go/src/pkg/runtime/malloc.c:316 runtime.stackalloc(0x1000, 0x4055b0) runtime.malg+0x3d /home/rsc/g/go/src/pkg/runtime/proc.c:803 runtime.malg(0x1000, 0x40add9) runtime.newproc1+0x12b /home/rsc/g/go/src/pkg/runtime/proc.c:854 runtime.newproc1(0xf840027440, 0x7f0d27b49230, 0x0, 0x49f238, 0x40, ...) runtime.newproc+0x2f /home/rsc/g/go/src/pkg/runtime/proc.c:831 runtime.newproc(0x0, 0xf840027440, 0xf800000010, 0x44b059) ... R=r, r2 CC=golang-dev https://golang.org/cl/4216045
2011-02-23 13:51:20 -07:00
} else {
// have to call stackalloc on scheduler stack.
newg->stacksize = stacksize;
g->param = newg;
fn = mstackalloc;
runtime·mcall(&fn);
runtime: always run stackalloc on scheduler stack Avoids deadlocks like the one below, in which a stack split happened in order to call lock(&stacks), but then the stack unsplit cannot run because stacks is now locked. The only code calling stackalloc that wasn't on a scheduler stack already was malg, which creates a new goroutine. runtime.futex+0x23 /home/rsc/g/go/src/pkg/runtime/linux/amd64/sys.s:139 runtime.futex() futexsleep+0x50 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:51 futexsleep(0x5b0188, 0x300000003, 0x100020000, 0x4159e2) futexlock+0x85 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:119 futexlock(0x5b0188, 0x5b0188) runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x7f0d27b4a000) runtime.stackfree+0x4d /home/rsc/g/go/src/pkg/runtime/malloc.goc:336 runtime.stackfree(0x7f0d27b4a000, 0x1000, 0x8, 0x7fff37e1e218) runtime.oldstack+0xa6 /home/rsc/g/go/src/pkg/runtime/proc.c:705 runtime.oldstack() runtime.lessstack+0x22 /home/rsc/g/go/src/pkg/runtime/amd64/asm.s:224 runtime.lessstack() ----- lessstack called from goroutine 2 ----- runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x40a5e2) runtime.stackalloc+0x55 /home/rsc/g/go/src/pkg/runtime/malloc.c:316 runtime.stackalloc(0x1000, 0x4055b0) runtime.malg+0x3d /home/rsc/g/go/src/pkg/runtime/proc.c:803 runtime.malg(0x1000, 0x40add9) runtime.newproc1+0x12b /home/rsc/g/go/src/pkg/runtime/proc.c:854 runtime.newproc1(0xf840027440, 0x7f0d27b49230, 0x0, 0x49f238, 0x40, ...) runtime.newproc+0x2f /home/rsc/g/go/src/pkg/runtime/proc.c:831 runtime.newproc(0x0, 0xf840027440, 0xf800000010, 0x44b059) ... R=r, r2 CC=golang-dev https://golang.org/cl/4216045
2011-02-23 13:51:20 -07:00
stk = g->param;
g->param = nil;
}
newg->stack0 = (uintptr)stk;
newg->stackguard = (uintptr)stk + StackGuard;
newg->stackguard0 = newg->stackguard;
newg->stackbase = (uintptr)stk + stacksize - sizeof(Stktop);
}
runtime: always run stackalloc on scheduler stack Avoids deadlocks like the one below, in which a stack split happened in order to call lock(&stacks), but then the stack unsplit cannot run because stacks is now locked. The only code calling stackalloc that wasn't on a scheduler stack already was malg, which creates a new goroutine. runtime.futex+0x23 /home/rsc/g/go/src/pkg/runtime/linux/amd64/sys.s:139 runtime.futex() futexsleep+0x50 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:51 futexsleep(0x5b0188, 0x300000003, 0x100020000, 0x4159e2) futexlock+0x85 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:119 futexlock(0x5b0188, 0x5b0188) runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x7f0d27b4a000) runtime.stackfree+0x4d /home/rsc/g/go/src/pkg/runtime/malloc.goc:336 runtime.stackfree(0x7f0d27b4a000, 0x1000, 0x8, 0x7fff37e1e218) runtime.oldstack+0xa6 /home/rsc/g/go/src/pkg/runtime/proc.c:705 runtime.oldstack() runtime.lessstack+0x22 /home/rsc/g/go/src/pkg/runtime/amd64/asm.s:224 runtime.lessstack() ----- lessstack called from goroutine 2 ----- runtime.lock+0x56 /home/rsc/g/go/src/pkg/runtime/linux/thread.c:158 runtime.lock(0x5b0188, 0x40a5e2) runtime.stackalloc+0x55 /home/rsc/g/go/src/pkg/runtime/malloc.c:316 runtime.stackalloc(0x1000, 0x4055b0) runtime.malg+0x3d /home/rsc/g/go/src/pkg/runtime/proc.c:803 runtime.malg(0x1000, 0x40add9) runtime.newproc1+0x12b /home/rsc/g/go/src/pkg/runtime/proc.c:854 runtime.newproc1(0xf840027440, 0x7f0d27b49230, 0x0, 0x49f238, 0x40, ...) runtime.newproc+0x2f /home/rsc/g/go/src/pkg/runtime/proc.c:831 runtime.newproc(0x0, 0xf840027440, 0xf800000010, 0x44b059) ... R=r, r2 CC=golang-dev https://golang.org/cl/4216045
2011-02-23 13:51:20 -07:00
return newg;
}
static void
newproc_m(void)
{
byte *argp;
void *callerpc;
FuncVal *fn;
int32 siz;
siz = g->m->scalararg[0];
callerpc = (void*)g->m->scalararg[1];
argp = g->m->ptrarg[0];
fn = (FuncVal*)g->m->ptrarg[1];
runtime·newproc1(fn, argp, siz, 0, callerpc);
g->m->ptrarg[0] = nil;
g->m->ptrarg[1] = nil;
}
// Create a new g running fn with siz bytes of arguments.
// Put it on the queue of g's waiting to run.
// The compiler turns a go statement into a call to this.
// Cannot split the stack because it assumes that the arguments
// are available sequentially after &fn; they would not be
// copied if a stack split occurred.
#pragma textflag NOSPLIT
void
runtime·newproc(int32 siz, FuncVal* fn, ...)
{
byte *argp;
void (*mfn)(void);
if(thechar == '5')
argp = (byte*)(&fn+2); // skip caller's saved LR
else
argp = (byte*)(&fn+1);
g->m->locks++;
g->m->scalararg[0] = siz;
g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
g->m->ptrarg[0] = argp;
g->m->ptrarg[1] = fn;
mfn = newproc_m;
runtime·onM(&mfn);
g->m->locks--;
}
// Create a new g running fn with narg bytes of arguments starting
// at argp and returning nret bytes of results. callerpc is the
// address of the go statement that created this. The new g is put
// on the queue of g's waiting to run.
G*
runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
{
byte *sp;
G *newg;
P *p;
int32 siz;
if(fn == nil) {
2014-06-26 09:54:39 -06:00
g->m->throwing = -1; // do not dump full stacks
runtime·throw("go of nil func value");
}
2014-06-26 09:54:39 -06:00
g->m->locks++; // disable preemption because it can be holding p in a local var
siz = narg + nret;
siz = (siz+7) & ~7;
// We could instead create a secondary stack frame
// and make it look like goexit was on the original but
// the call to the actual goroutine function was split.
// Not worth it: this is almost always an error.
if(siz > StackMin - 1024)
runtime·throw("runtime.newproc: function arguments too large for new goroutine");
2014-06-26 09:54:39 -06:00
p = g->m->p;
if((newg = gfget(p)) != nil) {
if(newg->stackguard - StackGuard != newg->stack0)
runtime·throw("invalid stack in newg");
} else {
ld: detect stack overflow due to NOSPLIT Fix problems found. On amd64, various library routines had bigger stack frames than expected, because large function calls had been added. runtime.assertI2T: nosplit stack overflow 120 assumed on entry to runtime.assertI2T 8 after runtime.assertI2T uses 112 0 on entry to runtime.newTypeAssertionError -8 on entry to runtime.morestack01 runtime.assertE2E: nosplit stack overflow 120 assumed on entry to runtime.assertE2E 16 after runtime.assertE2E uses 104 8 on entry to runtime.panic 0 on entry to runtime.morestack16 -8 after runtime.morestack16 uses 8 runtime.assertE2T: nosplit stack overflow 120 assumed on entry to runtime.assertE2T 16 after runtime.assertE2T uses 104 8 on entry to runtime.panic 0 on entry to runtime.morestack16 -8 after runtime.morestack16 uses 8 runtime.newselect: nosplit stack overflow 120 assumed on entry to runtime.newselect 56 after runtime.newselect uses 64 48 on entry to runtime.printf 8 after runtime.printf uses 40 0 on entry to vprintf -8 on entry to runtime.morestack16 runtime.selectdefault: nosplit stack overflow 120 assumed on entry to runtime.selectdefault 56 after runtime.selectdefault uses 64 48 on entry to runtime.printf 8 after runtime.printf uses 40 0 on entry to vprintf -8 on entry to runtime.morestack16 runtime.selectgo: nosplit stack overflow 120 assumed on entry to runtime.selectgo 0 after runtime.selectgo uses 120 -8 on entry to runtime.gosched On arm, 5c was tagging functions NOSPLIT that should not have been, like the recursive function printpanics: printpanics: nosplit stack overflow 124 assumed on entry to printpanics 112 after printpanics uses 12 108 on entry to printpanics 96 after printpanics uses 12 92 on entry to printpanics 80 after printpanics uses 12 76 on entry to printpanics 64 after printpanics uses 12 60 on entry to printpanics 48 after printpanics uses 12 44 on entry to printpanics 32 after printpanics uses 12 28 on entry to printpanics 16 after printpanics uses 12 12 on entry to printpanics 0 after printpanics uses 12 -4 on entry to printpanics R=r, r2 CC=golang-dev https://golang.org/cl/4188061
2011-02-22 15:40:40 -07:00
newg = runtime·malg(StackMin);
runtime·casgstatus(newg, Gidle, Gdead);
allgadd(newg); // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
}
if(runtime·readgstatus(newg) != Gdead)
runtime·throw("newproc1: new g is not Gdead");
sp = (byte*)newg->stackbase;
sp -= siz;
runtime·memmove(sp, argp, narg);
if(thechar == '5') {
// caller's LR
sp -= sizeof(void*);
*(void**)sp = nil;
}
runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
newg->sched.sp = (uintptr)sp;
newg->sched.pc = (uintptr)runtime·goexit;
newg->sched.g = newg;
runtime·gostartcallfn(&newg->sched, fn);
newg->gopc = (uintptr)callerpc;
runtime·casgstatus(newg, Gdead, Grunnable);
if(p->goidcache == p->goidcacheend) {
// Sched.goidgen is the last allocated id,
// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
// At startup sched.goidgen=0, so main goroutine receives goid=1.
p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
p->goidcache -= GoidCacheBatch - 1;
p->goidcacheend = p->goidcache + GoidCacheBatch;
}
newg->goid = p->goidcache++;
runtime, cmd/gc, cmd/ld: ignore method wrappers in recover Bug #1: Issue 5406 identified an interesting case: defer iface.M() may end up calling a wrapper that copies an indirect receiver from the iface value and then calls the real M method. That's two calls down, not just one, and so recover() == nil always in the real M method, even during a panic. [For the purposes of this entire discussion, a wrapper's implementation is a function containing an ordinary call, not the optimized tail call form that is somtimes possible. The tail call does not create a second frame, so it is already handled correctly.] Fix this bug by introducing g->panicwrap, which counts the number of bytes on current stack segment that are due to wrapper calls that should not count against the recover check. All wrapper functions must now adjust g->panicwrap up on entry and back down on exit. This adds slightly to their expense; on the x86 it is a single instruction at entry and exit; on the ARM it is three. However, the alternative is to make a call to recover depend on being able to walk the stack, which I very much want to avoid. We have enough problems walking the stack for garbage collection and profiling. Also, if performance is critical in a specific case, it is already faster to use a pointer receiver and avoid this kind of wrapper entirely. Bug #2: The old code, which did not consider the possibility of two calls, already contained a check to see if the call had split its stack and so the panic-created segment was one behind the current segment. In the wrapper case, both of the two calls might split their stacks, so the panic-created segment can be two behind the current segment. Fix this by propagating the Stktop.panic flag forward during stack splits instead of looking backward during recover. Fixes #5406. R=golang-dev, iant CC=golang-dev https://golang.org/cl/13367052
2013-09-12 12:00:16 -06:00
newg->panicwrap = 0;
if(raceenabled)
newg->racectx = runtime·racegostart((void*)callerpc);
runqput(p, newg);
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
wakep();
2014-06-26 09:54:39 -06:00
g->m->locks--;
if(g->m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
g->stackguard0 = StackPreempt;
return newg;
}
static void
allgadd(G *gp)
{
G **new;
uintptr cap;
if(runtime·readgstatus(gp) == Gidle)
runtime·throw("allgadd: bad status Gidle");
runtime·lock(&allglock);
if(runtime·allglen >= allgcap) {
cap = 4096/sizeof(new[0]);
if(cap < 2*allgcap)
cap = 2*allgcap;
new = runtime·mallocgc(cap*sizeof(new[0]), nil, 0);
if(new == nil)
runtime·throw("runtime: cannot allocate memory");
if(runtime·allg != nil)
runtime·memmove(new, runtime·allg, runtime·allglen*sizeof(new[0]));
runtime·allg = new;
runtime·allgs.array = (void*)runtime·allg;
allgcap = cap;
runtime·allgs.cap = allgcap;
}
runtime·allg[runtime·allglen++] = gp;
runtime·allgs.len = runtime·allglen;
runtime·unlock(&allglock);
}
// Put on gfree list.
// If local list is too long, transfer a batch to the global list.
static void
gfput(P *p, G *gp)
{
uintptr stksize;
Stktop *top;
if(runtime·readgstatus(gp) != Gdead)
runtime·throw("gfput: bad status (not Gdead)");
if(gp->stackguard - StackGuard != gp->stack0)
runtime·throw("invalid stack in gfput");
stksize = gp->stackbase + sizeof(Stktop) - gp->stack0;
if(stksize != gp->stacksize) {
runtime·printf("runtime: bad stacksize, goroutine %D, remain=%d, last=%d\n",
gp->goid, (int32)gp->stacksize, (int32)stksize);
runtime·throw("gfput: bad stacksize");
}
top = (Stktop*)gp->stackbase;
if(stksize != FixedStack) {
// non-standard stack size - free it.
runtime·stackfree(gp, (void*)gp->stack0, top);
gp->stack0 = 0;
gp->stackguard = 0;
gp->stackguard0 = 0;
gp->stackbase = 0;
}
gp->schedlink = p->gfree;
p->gfree = gp;
p->gfreecnt++;
if(p->gfreecnt >= 64) {
runtime·lock(&runtime·sched.gflock);
while(p->gfreecnt >= 32) {
p->gfreecnt--;
gp = p->gfree;
p->gfree = gp->schedlink;
gp->schedlink = runtime·sched.gfree;
runtime·sched.gfree = gp;
runtime·sched.ngfree++;
}
runtime·unlock(&runtime·sched.gflock);
}
}
// Get from gfree list.
// If local list is empty, grab a batch from global list.
static G*
gfget(P *p)
{
G *gp;
byte *stk;
void (*fn)(G*);
retry:
gp = p->gfree;
if(gp == nil && runtime·sched.gfree) {
runtime·lock(&runtime·sched.gflock);
while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
p->gfreecnt++;
gp = runtime·sched.gfree;
runtime·sched.gfree = gp->schedlink;
runtime·sched.ngfree--;
gp->schedlink = p->gfree;
p->gfree = gp;
}
runtime·unlock(&runtime·sched.gflock);
goto retry;
}
if(gp) {
p->gfree = gp->schedlink;
p->gfreecnt--;
if(gp->stack0 == 0) {
// Stack was deallocated in gfput. Allocate a new one.
2014-06-26 09:54:39 -06:00
if(g == g->m->g0) {
stk = runtime·stackalloc(gp, FixedStack);
} else {
gp->stacksize = FixedStack;
g->param = gp;
fn = mstackalloc;
runtime·mcall(&fn);
stk = g->param;
g->param = nil;
}
gp->stack0 = (uintptr)stk;
gp->stackbase = (uintptr)stk + FixedStack - sizeof(Stktop);
gp->stackguard = (uintptr)stk + StackGuard;
gp->stackguard0 = gp->stackguard;
} else {
if(raceenabled)
runtime·racemalloc((void*)gp->stack0, gp->stackbase + sizeof(Stktop) - gp->stack0);
}
}
return gp;
}
// Purge all cached G's from gfree list to the global list.
static void
gfpurge(P *p)
{
G *gp;
runtime·lock(&runtime·sched.gflock);
while(p->gfreecnt != 0) {
p->gfreecnt--;
gp = p->gfree;
p->gfree = gp->schedlink;
gp->schedlink = runtime·sched.gfree;
runtime·sched.gfree = gp;
runtime·sched.ngfree++;
}
runtime·unlock(&runtime·sched.gflock);
}
void
runtime·Breakpoint(void)
{
runtime·breakpoint();
}
// Implementation of runtime.GOMAXPROCS.
// delete when scheduler is even stronger
void
runtime·gomaxprocs_m(void)
{
int32 n, ret;
n = g->m->scalararg[0];
g->m->scalararg[0] = 0;
if(n > MaxGomaxprocs)
n = MaxGomaxprocs;
runtime·lock(&runtime·sched.lock);
ret = runtime·gomaxprocs;
if(n <= 0 || n == ret) {
runtime·unlock(&runtime·sched.lock);
g->m->scalararg[0] = ret;
return;
}
runtime·unlock(&runtime·sched.lock);
runtime·semacquire(&runtime·worldsema, false);
2014-06-26 09:54:39 -06:00
g->m->gcing = 1;
runtime·stoptheworld();
newprocs = n;
2014-06-26 09:54:39 -06:00
g->m->gcing = 0;
runtime·semrelease(&runtime·worldsema);
runtime·starttheworld();
g->m->scalararg[0] = ret;
return;
}
// lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
// after they modify m->locked. Do not allow preemption during this call,
// or else the m might be different in this function than in the caller.
#pragma textflag NOSPLIT
static void
lockOSThread(void)
{
2014-06-26 09:54:39 -06:00
g->m->lockedg = g;
g->lockedm = g->m;
}
#pragma textflag NOSPLIT
void
runtime·LockOSThread(void)
{
2014-06-26 09:54:39 -06:00
g->m->locked |= LockExternal;
lockOSThread();
}
#pragma textflag NOSPLIT
void
runtime·lockOSThread(void)
{
2014-06-26 09:54:39 -06:00
g->m->locked += LockInternal;
lockOSThread();
}
// unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
// after they update m->locked. Do not allow preemption during this call,
// or else the m might be in different in this function than in the caller.
#pragma textflag NOSPLIT
static void
unlockOSThread(void)
{
2014-06-26 09:54:39 -06:00
if(g->m->locked != 0)
return;
2014-06-26 09:54:39 -06:00
g->m->lockedg = nil;
g->lockedm = nil;
}
#pragma textflag NOSPLIT
void
runtime·UnlockOSThread(void)
{
2014-06-26 09:54:39 -06:00
g->m->locked &= ~LockExternal;
unlockOSThread();
}
static void badunlockOSThread(void);
#pragma textflag NOSPLIT
void
runtime·unlockOSThread(void)
{
void (*fn)(void);
if(g->m->locked < LockInternal) {
fn = badunlockOSThread;
runtime·onM(&fn);
}
2014-06-26 09:54:39 -06:00
g->m->locked -= LockInternal;
unlockOSThread();
}
static void
badunlockOSThread(void)
{
runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
}
runtime: scheduler, cgo reorganization * Change use of m->g0 stack (aka scheduler stack). * Provide runtime.mcall(f) to invoke f() on m->g0 stack. * Replace scheduler loop entry with runtime.mcall(schedule). Runtime.mcall eliminates the need for fake scheduler states that exist just to run a bit of code on the m->g0 stack (Grecovery, Gstackalloc). The elimination of the scheduler as a loop that stops and starts using gosave and gogo fixes a bad interaction with the way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled) C functions on that stack, and then when calling back into Go, it sets m->g0->sched.sp below the added call frames, so that other uses of m->g0's stack will not interfere with those frames. Unfortunately, gogo (longjmp) back to the scheduler loop at this point would end up running scheduler with the lower sp, which no longer points at a valid stack frame for a call to scheduler. If scheduler then wrote any function call arguments or local variables to where it expected the stack frame to be, it would overwrite other data on the stack. I realized this possibility while debugging a problem with calling complex Go code in a Go -> C -> Go cgo callback. This wasn't the bug I was looking for, it turns out, but I believe it is a real bug nonetheless. Switching to runtime.mcall, which only adds new frames to the stack and never jumps into functions running in existing ones, fixes this bug. * Move cgo-related code out of proc.c into cgocall.c. * Add very large comment describing cgo call sequences. * Simpilify, regularize cgo function implementations and names. * Add test suite as misc/cgo/test. Now the Go -> C path calls cgocall, which calls asmcgocall, and the C -> Go path calls cgocallback, which calls cgocallbackg. The shuffling, which affects mainly the callback case, moves most of the callback implementation to cgocallback running on the m->curg stack (not the m->g0 scheduler stack) and only while accounted for with $GOMAXPROCS (between calls to exitsyscall and entersyscall). The previous callback code did not block in startcgocallback's approximation to exitsyscall, so if, say, the garbage collector were running, it would still barge in and start doing things like call malloc. Similarly endcgocallback's approximation of entersyscall did not call matchmg to kick off new OS threads when necessary, which caused the bug in issue 1560. Fixes #1560. R=iant CC=golang-dev https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
bool
runtime·lockedOSThread(void)
{
2014-06-26 09:54:39 -06:00
return g->lockedm != nil && g->m->lockedg != nil;
runtime: scheduler, cgo reorganization * Change use of m->g0 stack (aka scheduler stack). * Provide runtime.mcall(f) to invoke f() on m->g0 stack. * Replace scheduler loop entry with runtime.mcall(schedule). Runtime.mcall eliminates the need for fake scheduler states that exist just to run a bit of code on the m->g0 stack (Grecovery, Gstackalloc). The elimination of the scheduler as a loop that stops and starts using gosave and gogo fixes a bad interaction with the way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled) C functions on that stack, and then when calling back into Go, it sets m->g0->sched.sp below the added call frames, so that other uses of m->g0's stack will not interfere with those frames. Unfortunately, gogo (longjmp) back to the scheduler loop at this point would end up running scheduler with the lower sp, which no longer points at a valid stack frame for a call to scheduler. If scheduler then wrote any function call arguments or local variables to where it expected the stack frame to be, it would overwrite other data on the stack. I realized this possibility while debugging a problem with calling complex Go code in a Go -> C -> Go cgo callback. This wasn't the bug I was looking for, it turns out, but I believe it is a real bug nonetheless. Switching to runtime.mcall, which only adds new frames to the stack and never jumps into functions running in existing ones, fixes this bug. * Move cgo-related code out of proc.c into cgocall.c. * Add very large comment describing cgo call sequences. * Simpilify, regularize cgo function implementations and names. * Add test suite as misc/cgo/test. Now the Go -> C path calls cgocall, which calls asmcgocall, and the C -> Go path calls cgocallback, which calls cgocallbackg. The shuffling, which affects mainly the callback case, moves most of the callback implementation to cgocallback running on the m->curg stack (not the m->g0 scheduler stack) and only while accounted for with $GOMAXPROCS (between calls to exitsyscall and entersyscall). The previous callback code did not block in startcgocallback's approximation to exitsyscall, so if, say, the garbage collector were running, it would still barge in and start doing things like call malloc. Similarly endcgocallback's approximation of entersyscall did not call matchmg to kick off new OS threads when necessary, which caused the bug in issue 1560. Fixes #1560. R=iant CC=golang-dev https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
}
#pragma textflag NOSPLIT
int32
runtime·gcount(void)
{
P *p, **pp;
int32 n;
n = runtime·allglen - runtime·sched.ngfree;
for(pp=runtime·allp; p=*pp; pp++)
n -= p->gfreecnt;
// All these variables can be changed concurrently, so the result can be inconsistent.
// But at least the current goroutine is running.
if(n < 1)
n = 1;
return n;
}
int32
runtime·mcount(void)
{
return runtime·sched.mcount;
}
runtime: scheduler, cgo reorganization * Change use of m->g0 stack (aka scheduler stack). * Provide runtime.mcall(f) to invoke f() on m->g0 stack. * Replace scheduler loop entry with runtime.mcall(schedule). Runtime.mcall eliminates the need for fake scheduler states that exist just to run a bit of code on the m->g0 stack (Grecovery, Gstackalloc). The elimination of the scheduler as a loop that stops and starts using gosave and gogo fixes a bad interaction with the way cgo uses the m->g0 stack. Cgo runs external (gcc-compiled) C functions on that stack, and then when calling back into Go, it sets m->g0->sched.sp below the added call frames, so that other uses of m->g0's stack will not interfere with those frames. Unfortunately, gogo (longjmp) back to the scheduler loop at this point would end up running scheduler with the lower sp, which no longer points at a valid stack frame for a call to scheduler. If scheduler then wrote any function call arguments or local variables to where it expected the stack frame to be, it would overwrite other data on the stack. I realized this possibility while debugging a problem with calling complex Go code in a Go -> C -> Go cgo callback. This wasn't the bug I was looking for, it turns out, but I believe it is a real bug nonetheless. Switching to runtime.mcall, which only adds new frames to the stack and never jumps into functions running in existing ones, fixes this bug. * Move cgo-related code out of proc.c into cgocall.c. * Add very large comment describing cgo call sequences. * Simpilify, regularize cgo function implementations and names. * Add test suite as misc/cgo/test. Now the Go -> C path calls cgocall, which calls asmcgocall, and the C -> Go path calls cgocallback, which calls cgocallbackg. The shuffling, which affects mainly the callback case, moves most of the callback implementation to cgocallback running on the m->curg stack (not the m->g0 scheduler stack) and only while accounted for with $GOMAXPROCS (between calls to exitsyscall and entersyscall). The previous callback code did not block in startcgocallback's approximation to exitsyscall, so if, say, the garbage collector were running, it would still barge in and start doing things like call malloc. Similarly endcgocallback's approximation of entersyscall did not call matchmg to kick off new OS threads when necessary, which caused the bug in issue 1560. Fixes #1560. R=iant CC=golang-dev https://golang.org/cl/4253054
2011-03-07 08:37:42 -07:00
void
runtime·badreflectcall(void) // called from assembly
{
runtime·panicstring("runtime: arg size to reflect.call more than 1GB");
}
static struct {
Mutex lock;
int32 hz;
} prof;
static void System(void) {}
static void ExternalCode(void) {}
static void GC(void) {}
extern void runtime·cpuproftick(uintptr*, int32);
extern byte runtime·etext[];
// Called if we receive a SIGPROF signal.
void
runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
{
int32 n;
bool traceback;
// Do not use global m in this function, use mp instead.
// On windows one m is sending reports about all the g's, so m means a wrong thing.
byte m;
uintptr stk[100];
m = 0;
USED(m);
if(prof.hz == 0)
return;
// Profiling runs concurrently with GC, so it must not allocate.
mp->mallocing++;
// Define that a "user g" is a user-created goroutine, and a "system g"
// is one that is m->g0 or m->gsignal. We've only made sure that we
// can unwind user g's, so exclude the system g's.
//
// It is not quite as easy as testing gp == m->curg (the current user g)
// because we might be interrupted for profiling halfway through a
// goroutine switch. The switch involves updating three (or four) values:
// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
// because once it gets updated the new g is running.
//
// When switching from a user g to a system g, LR is not considered live,
// so the update only affects g, SP, and PC. Since PC must be last, there
// the possible partial transitions in ordinary execution are (1) g alone is updated,
// (2) both g and SP are updated, and (3) SP alone is updated.
// If g is updated, we'll see a system g and not look closer.
// If SP alone is updated, we can detect the partial transition by checking
// whether the SP is within g's stack bounds. (We could also require that SP
// be changed only after g, but the stack bounds check is needed by other
// cases, so there is no need to impose an additional requirement.)
//
// There is one exceptional transition to a system g, not in ordinary execution.
// When a signal arrives, the operating system starts the signal handler running
// with an updated PC and SP. The g is updated last, at the beginning of the
// handler. There are two reasons this is okay. First, until g is updated the
// g and SP do not match, so the stack bounds check detects the partial transition.
// Second, signal handlers currently run with signals disabled, so a profiling
// signal cannot arrive during the handler.
//
// When switching from a system g to a user g, there are three possibilities.
//
// First, it may be that the g switch has no PC update, because the SP
// either corresponds to a user g throughout (as in runtime.asmcgocall)
// or because it has been arranged to look like a user g frame
// (as in runtime.cgocallback_gofunc). In this case, since the entire
// transition is a g+SP update, a partial transition updating just one of
// those will be detected by the stack bounds check.
//
// Second, when returning from a signal handler, the PC and SP updates
// are performed by the operating system in an atomic update, so the g
// update must be done before them. The stack bounds check detects
// the partial transition here, and (again) signal handlers run with signals
// disabled, so a profiling signal cannot arrive then anyway.
//
// Third, the common case: it may be that the switch updates g, SP, and PC
// separately, as in runtime.gogo.
//
// Because runtime.gogo is the only instance, we check whether the PC lies
// within that function, and if so, not ask for a traceback. This approach
// requires knowing the size of the runtime.gogo function, which we
// record in arch_*.h and check in runtime_test.go.
//
// There is another apparently viable approach, recorded here in case
// the "PC within runtime.gogo" check turns out not to be usable.
// It would be possible to delay the update of either g or SP until immediately
// before the PC update instruction. Then, because of the stack bounds check,
// the only problematic interrupt point is just before that PC update instruction,
// and the sigprof handler can detect that instruction and simulate stepping past
// it in order to reach a consistent state. On ARM, the update of g must be made
// in two places (in R10 and also in a TLS slot), so the delayed update would
// need to be the SP update. The sigprof handler must read the instruction at
// the current PC and if it was the known instruction (for example, JMP BX or
// MOV R2, PC), use that other register in place of the PC value.
// The biggest drawback to this solution is that it requires that we can tell
// whether it's safe to read from the memory pointed at by PC.
// In a correct program, we can test PC == nil and otherwise read,
// but if a profiling signal happens at the instant that a program executes
// a bad jump (before the program manages to handle the resulting fault)
// the profiling handler could fault trying to read nonexistent memory.
//
// To recap, there are no constraints on the assembly being used for the
// transition. We simply require that g and SP match and that the PC is not
// in runtime.gogo.
traceback = true;
if(gp == nil || gp != mp->curg ||
(uintptr)sp < gp->stackguard - StackGuard || gp->stackbase < (uintptr)sp ||
((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
traceback = false;
n = 0;
if(traceback)
n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, stk, nelem(stk), nil, nil, false);
if(!traceback || n <= 0) {
// Normal traceback is impossible or has failed.
// See if it falls into several common cases.
n = 0;
if(mp->ncgo > 0 && mp->curg != nil &&
mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
// Cgo, we can't unwind and symbolize arbitrary C code,
// so instead collect Go stack that leads to the cgo call.
// This is especially important on windows, since all syscalls are cgo calls.
n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, stk, nelem(stk), nil, nil, false);
}
#ifdef GOOS_windows
if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
// Libcall, i.e. runtime syscall on windows.
// Collect Go stack that leads to the call.
n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, stk, nelem(stk), nil, nil, false);
}
#endif
if(n == 0) {
// If all of the above has failed, account it against abstract "System" or "GC".
n = 2;
// "ExternalCode" is better than "etext".
if((uintptr)pc > (uintptr)runtime·etext)
pc = (byte*)ExternalCode + PCQuantum;
stk[0] = (uintptr)pc;
if(mp->gcing || mp->helpgc)
stk[1] = (uintptr)GC + PCQuantum;
else
stk[1] = (uintptr)System + PCQuantum;
}
}
if(prof.hz != 0) {
runtime·lock(&prof.lock);
if(prof.hz != 0)
runtime·cpuproftick(stk, n);
runtime·unlock(&prof.lock);
}
mp->mallocing--;
}
// Arrange to call fn with a traceback hz times a second.
void
runtime·setcpuprofilerate_m(void)
{
int32 hz;
hz = g->m->scalararg[0];
g->m->scalararg[0] = 0;
// Force sane arguments.
if(hz < 0)
hz = 0;
// Disable preemption, otherwise we can be rescheduled to another thread
// that has profiling enabled.
2014-06-26 09:54:39 -06:00
g->m->locks++;
// Stop profiler on this thread so that it is safe to lock prof.
// if a profiling signal came in while we had prof locked,
// it would deadlock.
runtime·resetcpuprofiler(0);
runtime·lock(&prof.lock);
prof.hz = hz;
runtime·unlock(&prof.lock);
runtime·lock(&runtime·sched.lock);
runtime·sched.profilehz = hz;
runtime·unlock(&runtime·sched.lock);
if(hz != 0)
runtime·resetcpuprofiler(hz);
2014-06-26 09:54:39 -06:00
g->m->locks--;
}
// Change number of processors. The world is stopped, sched is locked.
static void
procresize(int32 new)
{
int32 i, old;
bool empty;
G *gp;
P *p;
old = runtime·gomaxprocs;
if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
runtime·throw("procresize: invalid arg");
// initialize new P's
for(i = 0; i < new; i++) {
p = runtime·allp[i];
if(p == nil) {
p = (P*)runtime·mallocgc(sizeof(*p), 0, 0);
p->id = i;
p->status = Pgcstop;
runtime·atomicstorep(&runtime·allp[i], p);
}
if(p->mcache == nil) {
if(old==0 && i==0)
2014-06-26 09:54:39 -06:00
p->mcache = g->m->mcache; // bootstrap
else
p->mcache = runtime·allocmcache();
}
}
// redistribute runnable G's evenly
// collect all runnable goroutines in global queue preserving FIFO order
// FIFO order is required to ensure fairness even during frequent GCs
// see http://golang.org/issue/7126
empty = false;
while(!empty) {
empty = true;
for(i = 0; i < old; i++) {
p = runtime·allp[i];
if(p->runqhead == p->runqtail)
continue;
empty = false;
// pop from tail of local queue
p->runqtail--;
gp = p->runq[p->runqtail%nelem(p->runq)];
// push onto head of global queue
gp->schedlink = runtime·sched.runqhead;
runtime·sched.runqhead = gp;
if(runtime·sched.runqtail == nil)
runtime·sched.runqtail = gp;
runtime·sched.runqsize++;
}
}
// fill local queues with at most nelem(p->runq)/2 goroutines
// start at 1 because current M already executes some G and will acquire allp[0] below,
// so if we have a spare G we want to put it into allp[1].
for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
gp = runtime·sched.runqhead;
runtime·sched.runqhead = gp->schedlink;
if(runtime·sched.runqhead == nil)
runtime·sched.runqtail = nil;
runtime·sched.runqsize--;
runqput(runtime·allp[i%new], gp);
}
// free unused P's
for(i = new; i < old; i++) {
p = runtime·allp[i];
runtime·freemcache(p->mcache);
p->mcache = nil;
gfpurge(p);
p->status = Pdead;
// can't free P itself because it can be referenced by an M in syscall
}
2014-06-26 09:54:39 -06:00
if(g->m->p)
g->m->p->m = nil;
g->m->p = nil;
g->m->mcache = nil;
p = runtime·allp[0];
p->m = nil;
p->status = Pidle;
acquirep(p);
for(i = new-1; i > 0; i--) {
p = runtime·allp[i];
p->status = Pidle;
pidleput(p);
}
runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
}
// Associate p and the current m.
static void
acquirep(P *p)
{
2014-06-26 09:54:39 -06:00
if(g->m->p || g->m->mcache)
runtime·throw("acquirep: already in go");
if(p->m || p->status != Pidle) {
runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
runtime·throw("acquirep: invalid p state");
}
2014-06-26 09:54:39 -06:00
g->m->mcache = p->mcache;
g->m->p = p;
p->m = g->m;
p->status = Prunning;
}
// Disassociate p and the current m.
static P*
releasep(void)
{
P *p;
2014-06-26 09:54:39 -06:00
if(g->m->p == nil || g->m->mcache == nil)
runtime·throw("releasep: invalid arg");
2014-06-26 09:54:39 -06:00
p = g->m->p;
if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
2014-06-26 09:54:39 -06:00
g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status);
runtime·throw("releasep: invalid p state");
}
2014-06-26 09:54:39 -06:00
g->m->p = nil;
g->m->mcache = nil;
p->m = nil;
p->status = Pidle;
return p;
}
static void
incidlelocked(int32 v)
{
runtime·lock(&runtime·sched.lock);
runtime·sched.nmidlelocked += v;
if(v > 0)
checkdead();
runtime·unlock(&runtime·sched.lock);
}
// Check for deadlock situation.
// The check is based on number of running M's, if 0 -> deadlock.
static void
checkdead(void)
{
G *gp;
int32 run, grunning, s;
uintptr i;
// -1 for sysmon
run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
if(run > 0)
return;
// If we are dying because of a signal caught on an already idle thread,
// freezetheworld will cause all running threads to block.
// And runtime will essentially enter into deadlock state,
// except that there is a thread that will call runtime·exit soon.
if(runtime·panicking > 0)
return;
if(run < 0) {
runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
runtime·throw("checkdead: inconsistent counts");
}
grunning = 0;
runtime·lock(&allglock);
for(i = 0; i < runtime·allglen; i++) {
gp = runtime·allg[i];
if(gp->issystem)
continue;
s = runtime·readgstatus(gp);
switch(s&~Gscan) {
case Gwaiting:
grunning++;
break;
case Grunnable:
case Grunning:
case Gsyscall:
runtime·unlock(&allglock);
runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
runtime·throw("checkdead: runnable g");
break;
}
}
runtime·unlock(&allglock);
if(grunning == 0) // possible if main goroutine calls runtime·Goexit()
runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
2014-06-26 09:54:39 -06:00
g->m->throwing = -1; // do not dump full stacks
runtime·throw("all goroutines are asleep - deadlock!");
}
static void
sysmon(void)
{
uint32 idle, delay, nscavenge;
int64 now, unixnow, lastpoll, lasttrace, lastgc;
int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
G *gp;
// If we go two minutes without a garbage collection, force one to run.
forcegcperiod = 2*60*1e9;
// If a heap span goes unused for 5 minutes after a garbage collection,
// we hand it back to the operating system.
scavengelimit = 5*60*1e9;
if(runtime·debug.scavenge > 0) {
// Scavenge-a-lot for testing.
forcegcperiod = 10*1e6;
scavengelimit = 20*1e6;
}
lastscavenge = runtime·nanotime();
nscavenge = 0;
// Make wake-up period small enough for the sampling to be correct.
maxsleep = forcegcperiod/2;
if(scavengelimit < forcegcperiod)
maxsleep = scavengelimit/2;
lasttrace = 0;
idle = 0; // how many cycles in succession we had not wokeup somebody
delay = 0;
for(;;) {
if(idle == 0) // start with 20us sleep...
delay = 20;
else if(idle > 50) // start doubling the sleep after 1ms...
delay *= 2;
if(delay > 10*1000) // up to 10ms
delay = 10*1000;
runtime·usleep(delay);
if(runtime·debug.schedtrace <= 0 &&
(runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) { // TODO: fast atomic
runtime·lock(&runtime·sched.lock);
if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
runtime·atomicstore(&runtime·sched.sysmonwait, 1);
runtime·unlock(&runtime·sched.lock);
runtime·notetsleep(&runtime·sched.sysmonnote, maxsleep);
runtime·lock(&runtime·sched.lock);
runtime·atomicstore(&runtime·sched.sysmonwait, 0);
runtime·noteclear(&runtime·sched.sysmonnote);
idle = 0;
delay = 20;
}
runtime·unlock(&runtime·sched.lock);
}
// poll network if not polled for more than 10ms
lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
now = runtime·nanotime();
unixnow = runtime·unixnanotime();
if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
gp = runtime·netpoll(false); // non-blocking
if(gp) {
// Need to decrement number of idle locked M's
// (pretending that one more is running) before injectglist.
// Otherwise it can lead to the following situation:
// injectglist grabs all P's but before it starts M's to run the P's,
// another M returns from syscall, finishes running its G,
// observes that there is no work to do and no other running M's
// and reports deadlock.
incidlelocked(-1);
injectglist(gp);
incidlelocked(1);
}
}
// retake P's blocked in syscalls
// and preempt long running G's
if(retake(now))
idle = 0;
else
idle++;
// check if we need to force a GC
lastgc = runtime·atomicload64(&mstats.last_gc);
if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
runtime·lock(&runtime·forcegc.lock);
runtime·forcegc.idle = 0;
runtime·forcegc.g->schedlink = nil;
injectglist(runtime·forcegc.g);
runtime·unlock(&runtime·forcegc.lock);
}
// scavenge heap once in a while
if(lastscavenge + scavengelimit/2 < now) {
runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
lastscavenge = now;
nscavenge++;
}
if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
lasttrace = now;
runtime·schedtrace(runtime·debug.scheddetail);
}
}
}
typedef struct Pdesc Pdesc;
struct Pdesc
{
uint32 schedtick;
int64 schedwhen;
uint32 syscalltick;
int64 syscallwhen;
};
#pragma dataflag NOPTR
static Pdesc pdesc[MaxGomaxprocs];
static uint32
retake(int64 now)
{
uint32 i, s, n;
int64 t;
P *p;
Pdesc *pd;
n = 0;
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p==nil)
continue;
pd = &pdesc[i];
s = p->status;
if(s == Psyscall) {
// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
t = p->syscalltick;
if(pd->syscalltick != t) {
pd->syscalltick = t;
pd->syscallwhen = now;
continue;
}
// On the one hand we don't want to retake Ps if there is no other work to do,
// but on the other hand we want to retake them eventually
// because they can prevent the sysmon thread from deep sleep.
if(p->runqhead == p->runqtail &&
runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
pd->syscallwhen + 10*1000*1000 > now)
continue;
// Need to decrement number of idle locked M's
// (pretending that one more is running) before the CAS.
// Otherwise the M from which we retake can exit the syscall,
// increment nmidle and report deadlock.
incidlelocked(-1);
if(runtime·cas(&p->status, s, Pidle)) {
n++;
handoffp(p);
}
incidlelocked(1);
} else if(s == Prunning) {
// Preempt G if it's running for more than 10ms.
t = p->schedtick;
if(pd->schedtick != t) {
pd->schedtick = t;
pd->schedwhen = now;
continue;
}
if(pd->schedwhen + 10*1000*1000 > now)
continue;
preemptone(p);
}
}
return n;
}
// Tell all goroutines that they have been preempted and they should stop.
// This function is purely best-effort. It can fail to inform a goroutine if a
// processor just started running it.
// No locks need to be held.
// Returns true if preemption request was issued to at least one goroutine.
static bool
preemptall(void)
{
P *p;
int32 i;
bool res;
res = false;
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p == nil || p->status != Prunning)
continue;
res |= preemptone(p);
}
return res;
}
// Tell the goroutine running on processor P to stop.
// This function is purely best-effort. It can incorrectly fail to inform the
// goroutine. It can send inform the wrong goroutine. Even if it informs the
// correct goroutine, that goroutine might ignore the request if it is
// simultaneously executing runtime·newstack.
// No lock needs to be held.
// Returns true if preemption request was issued.
// The actual preemption will happen at some point in the future
// and will be indicated by the gp->status no longer being
// Grunning
static bool
preemptone(P *p)
{
M *mp;
G *gp;
mp = p->m;
2014-06-26 09:54:39 -06:00
if(mp == nil || mp == g->m)
return false;
gp = mp->curg;
if(gp == nil || gp == mp->g0)
return false;
gp->preempt = true;
// Every call in a go routine checks for stack overflow by
// comparing the current stack pointer to gp->stackguard0.
// Setting gp->stackguard0 to StackPreempt folds
// preemption into the normal stack overflow check.
gp->stackguard0 = StackPreempt;
return true;
}
void
runtime·schedtrace(bool detailed)
{
static int64 starttime;
int64 now;
int64 id1, id2, id3;
int32 i, t, h;
uintptr gi;
int8 *fmt;
M *mp, *lockedm;
G *gp, *lockedg;
P *p;
now = runtime·nanotime();
if(starttime == 0)
starttime = now;
runtime·lock(&runtime·sched.lock);
runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinningthreads=%d idlethreads=%d runqueue=%d",
(now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.runqsize);
if(detailed) {
runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmonwait=%d\n",
runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
runtime·sched.stopwait, runtime·sched.sysmonwait);
}
// We must be careful while reading data from P's, M's and G's.
// Even if we hold schedlock, most data can be changed concurrently.
// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
for(i = 0; i < runtime·gomaxprocs; i++) {
p = runtime·allp[i];
if(p == nil)
continue;
mp = p->m;
h = runtime·atomicload(&p->runqhead);
t = runtime·atomicload(&p->runqtail);
if(detailed)
runtime·printf(" P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
else {
// In non-detailed mode format lengths of per-P run queues as:
// [len1 len2 len3 len4]
fmt = " %d";
if(runtime·gomaxprocs == 1)
fmt = " [%d]\n";
else if(i == 0)
fmt = " [%d";
else if(i == runtime·gomaxprocs-1)
fmt = " %d]\n";
runtime·printf(fmt, t-h);
}
}
if(!detailed) {
runtime·unlock(&runtime·sched.lock);
return;
}
for(mp = runtime·allm; mp; mp = mp->alllink) {
p = mp->p;
gp = mp->curg;
lockedg = mp->lockedg;
id1 = -1;
if(p)
id1 = p->id;
id2 = -1;
if(gp)
id2 = gp->goid;
id3 = -1;
if(lockedg)
id3 = lockedg->goid;
runtime·printf(" M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
" locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
mp->id, id1, id2,
mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
2014-06-26 09:54:39 -06:00
mp->spinning, g->m->blocked, id3);
}
runtime·lock(&allglock);
for(gi = 0; gi < runtime·allglen; gi++) {
gp = runtime·allg[gi];
mp = gp->m;
lockedm = gp->lockedm;
runtime·printf(" G%D: status=%d(%S) m=%d lockedm=%d\n",
gp->goid, runtime·readgstatus(gp), gp->waitreason, mp ? mp->id : -1,
lockedm ? lockedm->id : -1);
}
runtime·unlock(&allglock);
runtime·unlock(&runtime·sched.lock);
}
// Put mp on midle list.
// Sched must be locked.
static void
mput(M *mp)
{
mp->schedlink = runtime·sched.midle;
runtime·sched.midle = mp;
runtime·sched.nmidle++;
checkdead();
}
// Try to get an m from midle list.
// Sched must be locked.
static M*
mget(void)
{
M *mp;
if((mp = runtime·sched.midle) != nil){
runtime·sched.midle = mp->schedlink;
runtime·sched.nmidle--;
}
return mp;
}
// Put gp on the global runnable queue.
// Sched must be locked.
static void
globrunqput(G *gp)
{
gp->schedlink = nil;
if(runtime·sched.runqtail)
runtime·sched.runqtail->schedlink = gp;
else
runtime·sched.runqhead = gp;
runtime·sched.runqtail = gp;
runtime·sched.runqsize++;
}
// Put a batch of runnable goroutines on the global runnable queue.
// Sched must be locked.
static void
globrunqputbatch(G *ghead, G *gtail, int32 n)
{
gtail->schedlink = nil;
if(runtime·sched.runqtail)
runtime·sched.runqtail->schedlink = ghead;
else
runtime·sched.runqhead = ghead;
runtime·sched.runqtail = gtail;
runtime·sched.runqsize += n;
}
// Try get a batch of G's from the global runnable queue.
// Sched must be locked.
static G*
globrunqget(P *p, int32 max)
{
G *gp, *gp1;
int32 n;
if(runtime·sched.runqsize == 0)
return nil;
n = runtime·sched.runqsize/runtime·gomaxprocs+1;
if(n > runtime·sched.runqsize)
n = runtime·sched.runqsize;
if(max > 0 && n > max)
n = max;
if(n > nelem(p->runq)/2)
n = nelem(p->runq)/2;
runtime·sched.runqsize -= n;
if(runtime·sched.runqsize == 0)
runtime·sched.runqtail = nil;
gp = runtime·sched.runqhead;
runtime·sched.runqhead = gp->schedlink;
n--;
while(n--) {
gp1 = runtime·sched.runqhead;
runtime·sched.runqhead = gp1->schedlink;
runqput(p, gp1);
}
return gp;
}
// Put p to on pidle list.
// Sched must be locked.
static void
pidleput(P *p)
{
p->link = runtime·sched.pidle;
runtime·sched.pidle = p;
runtime·xadd(&runtime·sched.npidle, 1); // TODO: fast atomic
}
// Try get a p from pidle list.
// Sched must be locked.
static P*
pidleget(void)
{
P *p;
p = runtime·sched.pidle;
if(p) {
runtime·sched.pidle = p->link;
runtime·xadd(&runtime·sched.npidle, -1); // TODO: fast atomic
}
return p;
}
// Try to put g on local runnable queue.
// If it's full, put onto global queue.
// Executed only by the owner P.
static void
runqput(P *p, G *gp)
{
uint32 h, t;
retry:
h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
t = p->runqtail;
if(t - h < nelem(p->runq)) {
p->runq[t%nelem(p->runq)] = gp;
runtime·atomicstore(&p->runqtail, t+1); // store-release, makes the item available for consumption
return;
}
if(runqputslow(p, gp, h, t))
return;
// the queue is not full, now the put above must suceed
goto retry;
}
// Put g and a batch of work from local runnable queue on global queue.
// Executed only by the owner P.
static bool
runqputslow(P *p, G *gp, uint32 h, uint32 t)
{
G *batch[nelem(p->runq)/2+1];
uint32 n, i;
// First, grab a batch from local queue.
n = t-h;
n = n/2;
if(n != nelem(p->runq)/2)
runtime·throw("runqputslow: queue is not full");
for(i=0; i<n; i++)
batch[i] = p->runq[(h+i)%nelem(p->runq)];
if(!runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
return false;
batch[n] = gp;
// Link the goroutines.
for(i=0; i<n; i++)
batch[i]->schedlink = batch[i+1];
// Now put the batch on global queue.
runtime·lock(&runtime·sched.lock);
globrunqputbatch(batch[0], batch[n], n+1);
runtime·unlock(&runtime·sched.lock);
return true;
}
// Get g from local runnable queue.
// Executed only by the owner P.
static G*
runqget(P *p)
{
G *gp;
uint32 t, h;
for(;;) {
h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
t = p->runqtail;
if(t == h)
return nil;
gp = p->runq[h%nelem(p->runq)];
if(runtime·cas(&p->runqhead, h, h+1)) // cas-release, commits consume
return gp;
}
}
// Grabs a batch of goroutines from local runnable queue.
// batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
// Can be executed by any P.
static uint32
runqgrab(P *p, G **batch)
{
uint32 t, h, n, i;
for(;;) {
h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with other consumers
t = runtime·atomicload(&p->runqtail); // load-acquire, synchronize with the producer
n = t-h;
n = n - n/2;
if(n == 0)
break;
if(n > nelem(p->runq)/2) // read inconsistent h and t
continue;
for(i=0; i<n; i++)
batch[i] = p->runq[(h+i)%nelem(p->runq)];
if(runtime·cas(&p->runqhead, h, h+n)) // cas-release, commits consume
break;
}
return n;
}
// Steal half of elements from local runnable queue of p2
// and put onto local runnable queue of p.
// Returns one of the stolen elements (or nil if failed).
static G*
runqsteal(P *p, P *p2)
{
G *gp;
G *batch[nelem(p->runq)/2];
uint32 t, h, n, i;
n = runqgrab(p2, batch);
if(n == 0)
return nil;
n--;
gp = batch[n];
if(n == 0)
return gp;
h = runtime·atomicload(&p->runqhead); // load-acquire, synchronize with consumers
t = p->runqtail;
if(t - h + n >= nelem(p->runq))
runtime·throw("runqsteal: runq overflow");
for(i=0; i<n; i++, t++)
p->runq[t%nelem(p->runq)] = batch[i];
runtime·atomicstore(&p->runqtail, t); // store-release, makes the item available for consumption
return gp;
}
void
runtime·testSchedLocalQueue(void)
{
P p;
G gs[nelem(p.runq)];
int32 i, j;
runtime·memclr((byte*)&p, sizeof(p));
for(i = 0; i < nelem(gs); i++) {
if(runqget(&p) != nil)
runtime·throw("runq is not empty initially");
for(j = 0; j < i; j++)
runqput(&p, &gs[i]);
for(j = 0; j < i; j++) {
if(runqget(&p) != &gs[i]) {
runtime·printf("bad element at iter %d/%d\n", i, j);
runtime·throw("bad element");
}
}
if(runqget(&p) != nil)
runtime·throw("runq is not empty afterwards");
}
}
void
runtime·testSchedLocalQueueSteal(void)
{
P p1, p2;
G gs[nelem(p1.runq)], *gp;
int32 i, j, s;
runtime·memclr((byte*)&p1, sizeof(p1));
runtime·memclr((byte*)&p2, sizeof(p2));
for(i = 0; i < nelem(gs); i++) {
for(j = 0; j < i; j++) {
gs[j].sig = 0;
runqput(&p1, &gs[j]);
}
gp = runqsteal(&p2, &p1);
s = 0;
if(gp) {
s++;
gp->sig++;
}
while(gp = runqget(&p2)) {
s++;
gp->sig++;
}
while(gp = runqget(&p1))
gp->sig++;
for(j = 0; j < i; j++) {
if(gs[j].sig != 1) {
runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
runtime·throw("bad element");
}
}
if(s != i/2 && s != i/2+1) {
runtime·printf("bad steal %d, want %d or %d, iter %d\n",
s, i/2, i/2+1, i);
runtime·throw("bad steal");
}
}
}
void
runtime·setmaxthreads_m(void)
{
int32 in;
runtime: use goc2c as much as possible Package runtime's C functions written to be called from Go started out written in C using carefully constructed argument lists and the FLUSH macro to write a result back to memory. For some functions, the appropriate parameter list ended up being architecture-dependent due to differences in alignment, so we added 'goc2c', which takes a .goc file containing Go func declarations but C bodies, rewrites the Go func declaration to equivalent C declarations for the target architecture, adds the needed FLUSH statements, and writes out an equivalent C file. That C file is compiled as part of package runtime. Native Client's x86-64 support introduces the most complex alignment rules yet, breaking many functions that could until now be portably written in C. Using goc2c for those avoids the breakage. Separately, Keith's work on emitting stack information from the C compiler would require the hand-written functions to add #pragmas specifying how many arguments are result parameters. Using goc2c for those avoids maintaining #pragmas. For both reasons, use goc2c for as many Go-called C functions as possible. This CL is a replay of the bulk of CL 15400047 and CL 15790043, both of which were reviewed as part of the NaCl port and are checked in to the NaCl branch. This CL is part of bringing the NaCl code into the main tree. No new code here, just reformatting and occasional movement into .h files. LGTM=r R=dave, alex.brainman, r CC=golang-codereviews https://golang.org/cl/65220044
2014-02-20 13:58:47 -07:00
int32 out;
in = g->m->scalararg[0];
runtime·lock(&runtime·sched.lock);
out = runtime·sched.maxmcount;
runtime·sched.maxmcount = in;
checkmcount();
runtime·unlock(&runtime·sched.lock);
g->m->scalararg[0] = out;
}
static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
static bool
haveexperiment(int8 *name)
{
int32 i, j;
for(i=0; i<sizeof(experiment); i++) {
if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
for(j=0; name[j]; j++)
if(experiment[i+j] != name[j])
goto nomatch;
if(experiment[i+j] != '\0' && experiment[i+j] != ',')
goto nomatch;
return 1;
}
nomatch:;
}
return 0;
}
#pragma textflag NOSPLIT
void
sync·runtime_procPin(intptr p)
{
M *mp;
mp = g->m;
// Disable preemption.
mp->locks++;
p = mp->p->id;
FLUSH(&p);
}
#pragma textflag NOSPLIT
void
sync·runtime_procUnpin()
{
g->m->locks--;
}