mirror of
https://github.com/golang/go
synced 2024-11-22 07:04:40 -07:00
runtime: more reliable preemption
Currently preemption signal g->stackguard0==StackPreempt can be lost if it is received when preemption is disabled (e.g. m->lock!=0). This change duplicates the preemption signal in g->preempt and restores g->stackguard0 when preemption is enabled. Update #543. R=golang-dev, rsc CC=golang-dev https://golang.org/cl/10792043
This commit is contained in:
parent
a83748596c
commit
5887f142a3
@ -5,6 +5,7 @@
|
||||
// +build freebsd linux
|
||||
|
||||
#include "runtime.h"
|
||||
#include "stack.h"
|
||||
|
||||
// This implementation depends on OS-specific implementations of
|
||||
//
|
||||
@ -99,6 +100,8 @@ runtime·unlock(Lock *l)
|
||||
|
||||
if(--m->locks < 0)
|
||||
runtime·throw("runtime·unlock: lock count");
|
||||
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
}
|
||||
|
||||
// One-time notifications.
|
||||
|
@ -5,6 +5,7 @@
|
||||
// +build darwin netbsd openbsd plan9 windows
|
||||
|
||||
#include "runtime.h"
|
||||
#include "stack.h"
|
||||
|
||||
// This implementation depends on OS-specific implementations of
|
||||
//
|
||||
@ -112,6 +113,8 @@ runtime·unlock(Lock *l)
|
||||
|
||||
if(--m->locks < 0)
|
||||
runtime·throw("runtime·unlock: lock count");
|
||||
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
}
|
||||
|
||||
// One-time notifications.
|
||||
|
@ -13,6 +13,7 @@ package runtime
|
||||
#include "type.h"
|
||||
#include "typekind.h"
|
||||
#include "race.h"
|
||||
#include "stack.h"
|
||||
|
||||
MHeap runtime·mheap;
|
||||
|
||||
@ -94,6 +95,8 @@ runtime·mallocgc(uintptr size, uint32 flag, int32 dogc, int32 zeroed)
|
||||
*(uintptr*)((uintptr)v+size-sizeof(uintptr)) = 0;
|
||||
|
||||
m->mallocing = 0;
|
||||
if(g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
|
||||
if(!(flag & FlagNoProfiling) && (rate = runtime·MemProfileRate) > 0) {
|
||||
if(size >= rate)
|
||||
|
@ -2014,6 +2014,7 @@ runtime·gc(int32 force)
|
||||
}
|
||||
|
||||
// all done
|
||||
m->gcing = 0;
|
||||
runtime·semrelease(&runtime·worldsema);
|
||||
runtime·starttheworld();
|
||||
|
||||
@ -2031,6 +2032,8 @@ runtime·gc(int32 force)
|
||||
// give the queued finalizers, if any, a chance to run
|
||||
runtime·gosched();
|
||||
}
|
||||
if(g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2115,7 +2118,6 @@ gc(struct gc_args *args)
|
||||
|
||||
cachestats();
|
||||
mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
|
||||
m->gcing = 0;
|
||||
|
||||
t4 = runtime·nanotime();
|
||||
mstats.last_gc = t4;
|
||||
|
@ -294,6 +294,8 @@ runtime·ready(G *gp)
|
||||
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0) // TODO: fast atomic
|
||||
wakep();
|
||||
m->locks--;
|
||||
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
}
|
||||
|
||||
int32
|
||||
@ -475,6 +477,8 @@ runtime·starttheworld(void)
|
||||
newm(mhelpgc, nil);
|
||||
}
|
||||
m->locks--;
|
||||
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
}
|
||||
|
||||
// Called to start an M.
|
||||
@ -564,6 +568,8 @@ runtime·allocm(P *p)
|
||||
if(p == m->p)
|
||||
releasep();
|
||||
m->locks--;
|
||||
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
|
||||
return mp;
|
||||
}
|
||||
@ -1008,6 +1014,7 @@ execute(G *gp)
|
||||
runtime·throw("execute: bad g status");
|
||||
}
|
||||
gp->status = Grunning;
|
||||
gp->preempt = false;
|
||||
gp->stackguard0 = gp->stackguard;
|
||||
m->p->tick++;
|
||||
m->curg = gp;
|
||||
@ -1433,6 +1440,8 @@ runtime·exitsyscall(void)
|
||||
// so okay to clear gcstack and gcsp.
|
||||
g->gcstack = (uintptr)nil;
|
||||
g->gcsp = (uintptr)nil;
|
||||
if(g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1450,6 +1459,8 @@ runtime·exitsyscall(void)
|
||||
g->status = Grunning;
|
||||
g->gcstack = (uintptr)nil;
|
||||
g->gcsp = (uintptr)nil;
|
||||
if(g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1620,6 +1631,8 @@ runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerp
|
||||
if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main) // TODO: fast atomic
|
||||
wakep();
|
||||
m->locks--;
|
||||
if(m->locks == 0 && g->preempt) // restore the preemption request in case we've cleared it in newstack
|
||||
g->stackguard0 = StackPreempt;
|
||||
return newg;
|
||||
}
|
||||
|
||||
@ -2174,6 +2187,7 @@ if(1) return;
|
||||
gp = mp->curg;
|
||||
if(gp == nil || gp == mp->g0)
|
||||
return;
|
||||
gp->preempt = true;
|
||||
gp->stackguard0 = StackPreempt;
|
||||
}
|
||||
|
||||
|
@ -253,6 +253,7 @@ struct G
|
||||
bool issystem; // do not output in stack dump
|
||||
bool isbackground; // ignore in deadlock detector
|
||||
bool blockingsyscall; // hint that the next syscall will block
|
||||
bool preempt; // preemption signal, duplicates stackguard0 = StackPreempt
|
||||
int8 raceignore; // ignore race detection events
|
||||
M* m; // for debuggers, but offset not hard-coded
|
||||
M* lockedm;
|
||||
|
@ -250,7 +250,7 @@ runtime·newstack(void)
|
||||
// We are interested in preempting user Go code, not runtime code.
|
||||
if(oldstatus != Grunning || m->locks || m->mallocing || m->gcing) {
|
||||
// Let the goroutine keep running for now.
|
||||
// TODO(dvyukov): remember but delay the preemption.
|
||||
// gp->preempt is set, so it will be preempted next time.
|
||||
gp->stackguard0 = gp->stackguard;
|
||||
gp->status = oldstatus;
|
||||
runtime·gogo(&gp->sched); // never return
|
||||
|
Loading…
Reference in New Issue
Block a user