mirror of
https://github.com/golang/go
synced 2024-11-19 15:44:44 -07:00
runtime: clean up GC code
Remove C version of GC. Convert freeOSMemory to Go. Restore g0 check in GC. Remove unknownGCPercent check in GC, it's initialized explicitly now. LGTM=rsc R=golang-codereviews, rsc CC=golang-codereviews, khr https://golang.org/cl/139910043
This commit is contained in:
parent
6f19fd438e
commit
9f38b6c9e5
@ -407,33 +407,19 @@ func profilealloc(mp *m, x unsafe.Pointer, size uintptr) {
|
||||
// force = 1 - do GC regardless of current heap usage
|
||||
// force = 2 - go GC and eager sweep
|
||||
func gogc(force int32) {
|
||||
if !memstats.enablegc {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: should never happen? Only C calls malloc while holding a lock?
|
||||
// The gc is turned off (via enablegc) until the bootstrap has completed.
|
||||
// Also, malloc gets called in the guts of a number of libraries that might be
|
||||
// holding locks. To avoid deadlocks during stoptheworld, don't bother
|
||||
// trying to run gc while holding a lock. The next mallocgc without a lock
|
||||
// will do the gc instead.
|
||||
mp := acquirem()
|
||||
if mp.locks > 1 {
|
||||
if gp := getg(); gp == mp.g0 || mp.locks > 1 || !memstats.enablegc || panicking != 0 || gcpercent < 0 {
|
||||
releasem(mp)
|
||||
return
|
||||
}
|
||||
releasem(mp)
|
||||
mp = nil
|
||||
|
||||
if panicking != 0 {
|
||||
return
|
||||
}
|
||||
if gcpercent == gcpercentUnknown {
|
||||
lock(&mheap_.lock)
|
||||
if gcpercent == gcpercentUnknown {
|
||||
gcpercent = readgogc()
|
||||
}
|
||||
unlock(&mheap_.lock)
|
||||
}
|
||||
if gcpercent < 0 {
|
||||
return
|
||||
}
|
||||
|
||||
semacquire(&worldsema, false)
|
||||
|
||||
if force == 0 && memstats.heap_alloc < memstats.next_gc {
|
||||
|
@ -519,7 +519,6 @@ void runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit);
|
||||
|
||||
void* runtime·persistentalloc(uintptr size, uintptr align, uint64 *stat);
|
||||
int32 runtime·mlookup(void *v, byte **base, uintptr *size, MSpan **s);
|
||||
void runtime·gc(int32 force);
|
||||
uintptr runtime·sweepone(void);
|
||||
void runtime·markspan(void *v, uintptr size, uintptr n, bool leftover);
|
||||
void runtime·unmarkspan(void *v, uintptr size);
|
||||
|
@ -1284,82 +1284,6 @@ runtime·gcinit(void)
|
||||
runtime·gcbssmask = unrollglobgcprog(runtime·gcbss, runtime·ebss - runtime·bss);
|
||||
}
|
||||
|
||||
// force = 1 - do GC regardless of current heap usage
|
||||
// force = 2 - go GC and eager sweep
|
||||
void
|
||||
runtime·gc(int32 force)
|
||||
{
|
||||
struct gc_args a;
|
||||
int32 i;
|
||||
|
||||
// The gc is turned off (via enablegc) until
|
||||
// the bootstrap has completed.
|
||||
// Also, malloc gets called in the guts
|
||||
// of a number of libraries that might be
|
||||
// holding locks. To avoid priority inversion
|
||||
// problems, don't bother trying to run gc
|
||||
// while holding a lock. The next mallocgc
|
||||
// without a lock will do the gc instead.
|
||||
if(!mstats.enablegc || g == g->m->g0 || g->m->locks > 0 || runtime·panicking)
|
||||
return;
|
||||
|
||||
if(runtime·gcpercent < 0)
|
||||
return;
|
||||
|
||||
runtime·semacquire(&runtime·worldsema, false);
|
||||
if(force==0 && mstats.heap_alloc < mstats.next_gc) {
|
||||
// typically threads which lost the race to grab
|
||||
// worldsema exit here when gc is done.
|
||||
runtime·semrelease(&runtime·worldsema);
|
||||
return;
|
||||
}
|
||||
|
||||
// Ok, we're doing it! Stop everybody else
|
||||
a.start_time = runtime·nanotime();
|
||||
a.eagersweep = force >= 2;
|
||||
g->m->gcing = 1;
|
||||
runtime·stoptheworld();
|
||||
|
||||
runtime·clearpools();
|
||||
|
||||
// Run gc on the g0 stack. We do this so that the g stack
|
||||
// we're currently running on will no longer change. Cuts
|
||||
// the root set down a bit (g0 stacks are not scanned, and
|
||||
// we don't need to scan gc's internal state). Also an
|
||||
// enabler for copyable stacks.
|
||||
for(i = 0; i < (runtime·debug.gctrace > 1 ? 2 : 1); i++) {
|
||||
if(i > 0)
|
||||
a.start_time = runtime·nanotime();
|
||||
// switch to g0, call gc(&a), then switch back
|
||||
g->param = &a;
|
||||
runtime·casgstatus(g, Grunning, Gwaiting);
|
||||
g->waitreason = runtime·gostringnocopy((byte*)"garbage collection");
|
||||
runtime·mcall(mgc);
|
||||
}
|
||||
|
||||
// all done
|
||||
g->m->gcing = 0;
|
||||
g->m->locks++;
|
||||
runtime·semrelease(&runtime·worldsema);
|
||||
runtime·starttheworld();
|
||||
g->m->locks--;
|
||||
|
||||
// now that gc is done, kick off finalizer thread if needed
|
||||
if(!ConcurrentSweep) {
|
||||
// give the queued finalizers, if any, a chance to run
|
||||
runtime·gosched();
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
mgc(G *gp)
|
||||
{
|
||||
gc(gp->param);
|
||||
gp->param = nil;
|
||||
runtime·casgstatus(gp, Gwaiting, Grunning);
|
||||
runtime·gogo(&gp->sched);
|
||||
}
|
||||
|
||||
void
|
||||
runtime·gc_m(void)
|
||||
{
|
||||
@ -1502,7 +1426,7 @@ gc(struct gc_args *args)
|
||||
if(ConcurrentSweep && !args->eagersweep) {
|
||||
runtime·lock(&gclock);
|
||||
if(sweep.g == nil)
|
||||
sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, runtime·gc);
|
||||
sweep.g = runtime·newproc1(&bgsweepv, nil, 0, 0, gc);
|
||||
else if(sweep.parked) {
|
||||
sweep.parked = false;
|
||||
runtime·ready(sweep.g);
|
||||
|
@ -34,3 +34,8 @@ func gc_unixnanotime(now *int64) {
|
||||
sec, nsec := timenow()
|
||||
*now = sec*1e9 + int64(nsec)
|
||||
}
|
||||
|
||||
func freeOSMemory() {
|
||||
gogc(2) // force GC and do eager sweep
|
||||
onM(&scavenge_m)
|
||||
}
|
||||
|
@ -622,19 +622,10 @@ runtime·MHeap_Scavenge(int32 k, uint64 now, uint64 limit)
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
scavenge_m(G *gp)
|
||||
void
|
||||
runtime·scavenge_m(void)
|
||||
{
|
||||
runtime·MHeap_Scavenge(-1, ~(uintptr)0, 0);
|
||||
runtime·gogo(&gp->sched);
|
||||
}
|
||||
|
||||
void
|
||||
runtime∕debug·freeOSMemory(void)
|
||||
{
|
||||
runtime·gc(2); // force GC and do eager sweep
|
||||
|
||||
runtime·mcall(scavenge_m);
|
||||
}
|
||||
|
||||
// Initialize a new span with the given start and npages.
|
||||
|
@ -78,6 +78,7 @@ var (
|
||||
largeAlloc_m,
|
||||
mprofMalloc_m,
|
||||
gc_m,
|
||||
scavenge_m,
|
||||
setFinalizer_m,
|
||||
removeFinalizer_m,
|
||||
markallocated_m,
|
||||
@ -111,8 +112,7 @@ func memmove(to unsafe.Pointer, from unsafe.Pointer, n uintptr)
|
||||
func fastrand2() uint32
|
||||
|
||||
const (
|
||||
gcpercentUnknown = -2
|
||||
concurrentSweep = true
|
||||
concurrentSweep = true
|
||||
)
|
||||
|
||||
func gosched()
|
||||
|
@ -61,3 +61,6 @@ TEXT reflect·chanlen(SB), NOSPLIT, $0-0
|
||||
|
||||
TEXT reflect·chancap(SB), NOSPLIT, $0-0
|
||||
JMP runtime·reflect_chancap(SB)
|
||||
|
||||
TEXT runtime∕debug·freeOSMemory(SB), NOSPLIT, $0-0
|
||||
JMP runtime·freeOSMemory(SB)
|
||||
|
Loading…
Reference in New Issue
Block a user