1
0
mirror of https://github.com/golang/go synced 2024-10-04 05:21:22 -06:00

runtime: track running goroutine count

Used to use mcpu+msyscall but that's
problematic for packing into a single
atomic word.  The running goroutine count
(where running == Go code or syscall)
can be maintained separately, always
manipulated under lock.

R=golang-dev, bradfitz
CC=golang-dev
https://golang.org/cl/4767041
This commit is contained in:
Russ Cox 2011-07-18 15:50:55 -04:00
parent 27753ff108
commit bd77619142

View File

@ -62,13 +62,13 @@ struct Sched {
G *gtail; G *gtail;
int32 gwait; // number of gs waiting to run int32 gwait; // number of gs waiting to run
int32 gcount; // number of gs that are alive int32 gcount; // number of gs that are alive
int32 grunning; // number of gs running on cpu or in syscall
M *mhead; // ms waiting for work M *mhead; // ms waiting for work
int32 mwait; // number of ms waiting for work int32 mwait; // number of ms waiting for work
int32 mcount; // number of ms that have been created int32 mcount; // number of ms that have been created
int32 mcpu; // number of ms executing on cpu int32 mcpu; // number of ms executing on cpu
int32 mcpumax; // max number of ms allowed on cpu int32 mcpumax; // max number of ms allowed on cpu
int32 msyscall; // number of ms in system calls
int32 predawn; // running initialization, don't run new gs. int32 predawn; // running initialization, don't run new gs.
int32 profilehz; // cpu profiling rate int32 profilehz; // cpu profiling rate
@ -353,6 +353,7 @@ newprocreadylocked(G *g)
static void static void
mnextg(M *m, G *g) mnextg(M *m, G *g)
{ {
runtime·sched.grunning++;
runtime·sched.mcpu++; runtime·sched.mcpu++;
m->nextg = g; m->nextg = g;
if(m->waitnextg) { if(m->waitnextg) {
@ -397,6 +398,7 @@ nextgandunlock(void)
mnextg(gp->lockedm, gp); mnextg(gp->lockedm, gp);
continue; continue;
} }
runtime·sched.grunning++;
runtime·sched.mcpu++; // this m will run gp runtime·sched.mcpu++; // this m will run gp
schedunlock(); schedunlock();
return gp; return gp;
@ -404,7 +406,7 @@ nextgandunlock(void)
// Otherwise, wait on global m queue. // Otherwise, wait on global m queue.
mput(m); mput(m);
} }
if(runtime·sched.mcpu == 0 && runtime·sched.msyscall == 0) if(runtime·sched.grunning == 0)
runtime·throw("all goroutines are asleep - deadlock!"); runtime·throw("all goroutines are asleep - deadlock!");
m->nextg = nil; m->nextg = nil;
m->waitnextg = 1; m->waitnextg = 1;
@ -548,6 +550,7 @@ schedule(G *gp)
// Just finished running gp. // Just finished running gp.
gp->m = nil; gp->m = nil;
runtime·sched.mcpu--; runtime·sched.mcpu--;
runtime·sched.grunning--;
if(runtime·sched.mcpu < 0) if(runtime·sched.mcpu < 0)
runtime·throw("runtime·sched.mcpu < 0 in scheduler"); runtime·throw("runtime·sched.mcpu < 0 in scheduler");
@ -634,7 +637,6 @@ runtime·entersyscall(void)
schedlock(); schedlock();
g->status = Gsyscall; g->status = Gsyscall;
runtime·sched.mcpu--; runtime·sched.mcpu--;
runtime·sched.msyscall++;
if(runtime·sched.gwait != 0) if(runtime·sched.gwait != 0)
matchmg(); matchmg();
@ -668,7 +670,6 @@ runtime·exitsyscall(void)
return; return;
schedlock(); schedlock();
runtime·sched.msyscall--;
runtime·sched.mcpu++; runtime·sched.mcpu++;
// Fast path - if there's room for this m, we're done. // Fast path - if there's room for this m, we're done.
if(m->profilehz == runtime·sched.profilehz && runtime·sched.mcpu <= runtime·sched.mcpumax) { if(m->profilehz == runtime·sched.profilehz && runtime·sched.mcpu <= runtime·sched.mcpumax) {