1
0
mirror of https://github.com/golang/go synced 2024-11-19 12:24:42 -07:00

runtime: don't lock mheap on user goroutine

This is bad for 2 reasons:
1. if the code under lock ever grows stack,
it will deadlock as stack growing acquires mheap lock.
2. It currently deadlocks with SetCPUProfileRate:
scavenger locks mheap, receives prof signal and tries to lock prof lock;
meanwhile SetCPUProfileRate locks prof lock and tries to grow stack
(presumably in runtime.unlock->futexwakeup). Boom.
Let's assume that it
Fixes #8407.

LGTM=rsc
R=golang-codereviews, rsc
CC=golang-codereviews, khr
https://golang.org/cl/112640043
This commit is contained in:
Dmitriy Vyukov 2014-07-23 18:52:25 +04:00
parent 0603fbb01c
commit e91704af27

View File

@ -642,7 +642,6 @@ static FuncVal forcegchelperv = {(void(*)(void))forcegchelper};
void void
runtime·MHeap_Scavenger(void) runtime·MHeap_Scavenger(void)
{ {
MHeap *h;
uint64 tick, forcegc, limit; uint64 tick, forcegc, limit;
int64 unixnow; int64 unixnow;
int32 k; int32 k;
@ -662,15 +661,12 @@ runtime·MHeap_Scavenger(void)
else else
tick = limit/2; tick = limit/2;
h = &runtime·mheap;
for(k=0;; k++) { for(k=0;; k++) {
runtime·noteclear(&note); runtime·noteclear(&note);
runtime·notetsleepg(&note, tick); runtime·notetsleepg(&note, tick);
runtime·lock(h);
unixnow = runtime·unixnanotime(); unixnow = runtime·unixnanotime();
if(unixnow - mstats.last_gc > forcegc) { if(unixnow - mstats.last_gc > forcegc) {
runtime·unlock(h);
// The scavenger can not block other goroutines, // The scavenger can not block other goroutines,
// otherwise deadlock detector can fire spuriously. // otherwise deadlock detector can fire spuriously.
// GC blocks other goroutines via the runtime·worldsema. // GC blocks other goroutines via the runtime·worldsema.
@ -680,13 +676,13 @@ runtime·MHeap_Scavenger(void)
runtime·notetsleepg(&note, -1); runtime·notetsleepg(&note, -1);
if(runtime·debug.gctrace > 0) if(runtime·debug.gctrace > 0)
runtime·printf("scvg%d: GC forced\n", k); runtime·printf("scvg%d: GC forced\n", k);
runtime·lock(h);
} }
runtime·unlock(h); g->m->locks++; // ensure that we are on the same m while filling arguments
g->m->scalararg[0] = k; g->m->scalararg[0] = k;
g->m->scalararg[1] = runtime·nanotime(); g->m->scalararg[1] = runtime·nanotime();
g->m->scalararg[2] = limit; g->m->scalararg[2] = limit;
runtime·mcall(scavenge_m); runtime·mcall(scavenge_m);
g->m->locks--;
} }
} }