2009-01-26 18:37:05 -07:00
|
|
|
// Copyright 2009 The Go Authors. All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// Garbage collector -- step 0.
|
|
|
|
//
|
|
|
|
// Stop the world, mark and sweep garbage collector.
|
|
|
|
// NOT INTENDED FOR PRODUCTION USE.
|
|
|
|
//
|
|
|
|
// A mark and sweep collector provides a way to exercise
|
|
|
|
// and test the memory allocator and the stack walking machinery
|
|
|
|
// without also needing to get reference counting
|
|
|
|
// exactly right.
|
|
|
|
|
|
|
|
#include "runtime.h"
|
|
|
|
#include "malloc.h"
|
|
|
|
|
|
|
|
enum {
|
|
|
|
Debug = 0
|
|
|
|
};
|
|
|
|
|
2009-08-20 17:09:38 -06:00
|
|
|
extern byte data[];
|
2009-01-26 18:37:05 -07:00
|
|
|
extern byte etext[];
|
|
|
|
extern byte end[];
|
|
|
|
|
2010-02-08 22:41:54 -07:00
|
|
|
typedef struct Finq Finq;
|
|
|
|
struct Finq
|
|
|
|
{
|
|
|
|
void (*fn)(void*);
|
|
|
|
void *p;
|
|
|
|
int32 nret;
|
|
|
|
};
|
|
|
|
|
|
|
|
static Finq finq[128]; // finalizer queue - two elements per entry
|
|
|
|
static Finq *pfinq = finq;
|
|
|
|
static Finq *efinq = finq+nelem(finq);
|
2010-02-03 17:31:34 -07:00
|
|
|
|
|
|
|
static void sweepblock(byte*, int64, uint32*, int32);
|
|
|
|
|
2009-05-26 18:39:25 -06:00
|
|
|
enum {
|
|
|
|
PtrSize = sizeof(void*)
|
|
|
|
};
|
|
|
|
|
2009-01-26 18:37:05 -07:00
|
|
|
static void
|
|
|
|
scanblock(int32 depth, byte *b, int64 n)
|
|
|
|
{
|
|
|
|
int32 off;
|
|
|
|
void *obj;
|
|
|
|
uintptr size;
|
2010-02-10 01:00:12 -07:00
|
|
|
uint32 *refp, ref;
|
2009-01-26 18:37:05 -07:00
|
|
|
void **vp;
|
|
|
|
int64 i;
|
|
|
|
|
2010-02-03 17:31:34 -07:00
|
|
|
if(Debug > 1)
|
2009-01-26 18:37:05 -07:00
|
|
|
printf("%d scanblock %p %D\n", depth, b, n);
|
2009-05-26 18:39:25 -06:00
|
|
|
off = (uint32)(uintptr)b & (PtrSize-1);
|
2009-01-26 18:37:05 -07:00
|
|
|
if(off) {
|
2009-05-26 18:39:25 -06:00
|
|
|
b += PtrSize - off;
|
|
|
|
n -= PtrSize - off;
|
2009-01-26 18:37:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
vp = (void**)b;
|
2009-05-26 18:39:25 -06:00
|
|
|
n /= PtrSize;
|
2009-01-26 18:37:05 -07:00
|
|
|
for(i=0; i<n; i++) {
|
2009-12-03 18:22:23 -07:00
|
|
|
obj = vp[i];
|
|
|
|
if(obj == nil || (byte*)obj < mheap.min || (byte*)obj >= mheap.max)
|
|
|
|
continue;
|
2010-02-10 22:23:08 -07:00
|
|
|
if(mlookup(obj, &obj, &size, nil, &refp)) {
|
2010-02-10 01:00:12 -07:00
|
|
|
ref = *refp;
|
2010-03-23 21:48:23 -06:00
|
|
|
switch(ref & ~RefFlags) {
|
2010-02-10 01:00:12 -07:00
|
|
|
case RefFinalize:
|
|
|
|
// If marked for finalization already, some other finalization-ready
|
|
|
|
// object has a pointer: turn off finalization until that object is gone.
|
|
|
|
// This means that cyclic finalizer loops never get collected,
|
|
|
|
// so don't do that.
|
|
|
|
/* fall through */
|
|
|
|
case RefNone:
|
2010-02-03 17:31:34 -07:00
|
|
|
if(Debug > 1)
|
2009-01-26 18:37:05 -07:00
|
|
|
printf("%d found at %p: ", depth, &vp[i]);
|
2010-03-23 21:48:23 -06:00
|
|
|
*refp = RefSome | (ref & RefFlags);
|
2010-02-10 01:00:12 -07:00
|
|
|
if(!(ref & RefNoPointers))
|
|
|
|
scanblock(depth+1, obj, size);
|
|
|
|
break;
|
2009-01-26 18:37:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
2009-07-27 15:16:28 -06:00
|
|
|
scanstack(G *gp)
|
2009-01-26 18:37:05 -07:00
|
|
|
{
|
|
|
|
Stktop *stk;
|
|
|
|
byte *sp;
|
|
|
|
|
2010-01-06 20:24:11 -07:00
|
|
|
if(gp == g)
|
2009-07-27 15:16:28 -06:00
|
|
|
sp = (byte*)&gp;
|
|
|
|
else
|
|
|
|
sp = gp->sched.sp;
|
2010-02-03 17:31:34 -07:00
|
|
|
if(Debug > 1)
|
|
|
|
printf("scanstack %d %p\n", gp->goid, sp);
|
2009-07-27 15:16:28 -06:00
|
|
|
stk = (Stktop*)gp->stackbase;
|
2009-01-26 18:37:05 -07:00
|
|
|
while(stk) {
|
|
|
|
scanblock(0, sp, (byte*)stk - sp);
|
2009-06-17 16:12:16 -06:00
|
|
|
sp = stk->gobuf.sp;
|
|
|
|
stk = (Stktop*)stk->stackbase;
|
2009-01-26 18:37:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
mark(void)
|
|
|
|
{
|
2010-01-06 20:24:11 -07:00
|
|
|
G *gp;
|
2009-01-26 18:37:05 -07:00
|
|
|
|
2009-12-07 16:52:14 -07:00
|
|
|
// mark data+bss.
|
|
|
|
// skip mheap itself, which has no interesting pointers
|
|
|
|
// and is mostly zeroed and would not otherwise be paged in.
|
|
|
|
scanblock(0, data, (byte*)&mheap - data);
|
|
|
|
scanblock(0, (byte*)(&mheap+1), end - (byte*)(&mheap+1));
|
2009-01-26 18:37:05 -07:00
|
|
|
|
|
|
|
// mark stacks
|
|
|
|
for(gp=allg; gp!=nil; gp=gp->alllink) {
|
|
|
|
switch(gp->status){
|
|
|
|
default:
|
|
|
|
printf("unexpected G.status %d\n", gp->status);
|
|
|
|
throw("mark - bad status");
|
|
|
|
case Gdead:
|
|
|
|
break;
|
|
|
|
case Grunning:
|
2010-01-06 20:24:11 -07:00
|
|
|
if(gp != g)
|
2009-01-26 18:37:05 -07:00
|
|
|
throw("mark - world not stopped");
|
|
|
|
scanstack(gp);
|
|
|
|
break;
|
|
|
|
case Grunnable:
|
|
|
|
case Gsyscall:
|
|
|
|
case Gwaiting:
|
|
|
|
scanstack(gp);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-02-10 15:59:39 -07:00
|
|
|
// pass 0: mark RefNone with finalizer as RefFinalize and trace
|
2009-01-26 18:37:05 -07:00
|
|
|
static void
|
2010-02-10 15:59:39 -07:00
|
|
|
sweepspan0(MSpan *s)
|
2009-01-26 18:37:05 -07:00
|
|
|
{
|
|
|
|
byte *p;
|
2010-02-10 15:59:39 -07:00
|
|
|
uint32 ref, *gcrefp, *gcrefep;
|
|
|
|
int32 n, size, npages;
|
2009-03-30 01:01:07 -06:00
|
|
|
|
2009-01-26 18:37:05 -07:00
|
|
|
p = (byte*)(s->start << PageShift);
|
|
|
|
if(s->sizeclass == 0) {
|
|
|
|
// Large block.
|
2010-02-10 15:59:39 -07:00
|
|
|
ref = s->gcref0;
|
2010-03-23 21:48:23 -06:00
|
|
|
if((ref&~(RefFlags^RefHasFinalizer)) == (RefNone|RefHasFinalizer)) {
|
2010-02-10 15:59:39 -07:00
|
|
|
// Mark as finalizable.
|
2010-03-23 21:48:23 -06:00
|
|
|
s->gcref0 = RefFinalize | RefHasFinalizer | (ref&(RefFlags^RefHasFinalizer));
|
2010-02-10 15:59:39 -07:00
|
|
|
if(!(ref & RefNoPointers))
|
|
|
|
scanblock(100, p, s->npages<<PageShift);
|
|
|
|
}
|
2009-01-26 18:37:05 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chunk full of small blocks.
|
2010-02-10 15:59:39 -07:00
|
|
|
MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
|
|
|
|
gcrefp = s->gcref;
|
|
|
|
gcrefep = s->gcref + n;
|
|
|
|
for(; gcrefp < gcrefep; gcrefp++) {
|
|
|
|
ref = *gcrefp;
|
2010-03-23 21:48:23 -06:00
|
|
|
if((ref&~(RefFlags^RefHasFinalizer)) == (RefNone|RefHasFinalizer)) {
|
2010-02-10 15:59:39 -07:00
|
|
|
// Mark as finalizable.
|
2010-03-23 21:48:23 -06:00
|
|
|
*gcrefp = RefFinalize | RefHasFinalizer | (ref&(RefFlags^RefHasFinalizer));
|
2010-02-10 15:59:39 -07:00
|
|
|
if(!(ref & RefNoPointers))
|
|
|
|
scanblock(100, p+(gcrefp-s->gcref)*size, size);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2010-02-03 17:31:34 -07:00
|
|
|
|
2010-02-10 15:59:39 -07:00
|
|
|
// pass 1: free RefNone, queue RefFinalize, reset RefSome
|
2010-02-03 17:31:34 -07:00
|
|
|
static void
|
2010-02-10 15:59:39 -07:00
|
|
|
sweepspan1(MSpan *s)
|
2010-02-03 17:31:34 -07:00
|
|
|
{
|
2010-02-10 15:59:39 -07:00
|
|
|
int32 n, npages, size;
|
|
|
|
byte *p;
|
|
|
|
uint32 ref, *gcrefp, *gcrefep;
|
|
|
|
MCache *c;
|
2010-02-03 17:31:34 -07:00
|
|
|
|
2010-02-10 15:59:39 -07:00
|
|
|
p = (byte*)(s->start << PageShift);
|
|
|
|
if(s->sizeclass == 0) {
|
|
|
|
// Large block.
|
|
|
|
ref = s->gcref0;
|
2010-03-23 21:48:23 -06:00
|
|
|
switch(ref & ~RefFlags) {
|
2010-02-10 15:59:39 -07:00
|
|
|
case RefNone:
|
|
|
|
// Free large object.
|
|
|
|
mstats.alloc -= s->npages<<PageShift;
|
|
|
|
runtime_memclr(p, s->npages<<PageShift);
|
2010-03-23 21:48:23 -06:00
|
|
|
if(ref & RefProfiled)
|
|
|
|
MProf_Free(p, s->npages<<PageShift);
|
2010-02-10 15:59:39 -07:00
|
|
|
s->gcref0 = RefFree;
|
2010-03-08 15:15:44 -07:00
|
|
|
MHeap_Free(&mheap, s, 1);
|
2010-02-10 15:59:39 -07:00
|
|
|
break;
|
|
|
|
case RefFinalize:
|
|
|
|
if(pfinq < efinq) {
|
|
|
|
pfinq->p = p;
|
|
|
|
pfinq->nret = 0;
|
|
|
|
pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
|
|
|
|
ref &= ~RefHasFinalizer;
|
|
|
|
if(pfinq->fn == nil)
|
|
|
|
throw("finalizer inconsistency");
|
|
|
|
pfinq++;
|
|
|
|
}
|
|
|
|
// fall through
|
|
|
|
case RefSome:
|
2010-03-23 21:48:23 -06:00
|
|
|
s->gcref0 = RefNone | (ref&RefFlags);
|
2010-02-10 15:59:39 -07:00
|
|
|
break;
|
2010-02-03 17:31:34 -07:00
|
|
|
}
|
2010-02-10 15:59:39 -07:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Chunk full of small blocks.
|
|
|
|
MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
|
|
|
|
gcrefp = s->gcref;
|
|
|
|
gcrefep = s->gcref + n;
|
|
|
|
for(; gcrefp < gcrefep; gcrefp++, p += size) {
|
|
|
|
ref = *gcrefp;
|
|
|
|
if(ref < RefNone) // RefFree or RefStack
|
|
|
|
continue;
|
2010-03-23 21:48:23 -06:00
|
|
|
switch(ref & ~RefFlags) {
|
2010-02-10 15:59:39 -07:00
|
|
|
case RefNone:
|
|
|
|
// Free small object.
|
2010-03-23 21:48:23 -06:00
|
|
|
if(ref & RefProfiled)
|
|
|
|
MProf_Free(p, size);
|
2010-02-10 15:59:39 -07:00
|
|
|
*gcrefp = RefFree;
|
|
|
|
c = m->mcache;
|
|
|
|
if(size > sizeof(uintptr))
|
|
|
|
((uintptr*)p)[1] = 1; // mark as "needs to be zeroed"
|
|
|
|
mstats.alloc -= size;
|
|
|
|
mstats.by_size[s->sizeclass].nfree++;
|
|
|
|
MCache_Free(c, p, s->sizeclass, size);
|
|
|
|
break;
|
|
|
|
case RefFinalize:
|
|
|
|
if(pfinq < efinq) {
|
|
|
|
pfinq->p = p;
|
|
|
|
pfinq->nret = 0;
|
|
|
|
pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
|
|
|
|
ref &= ~RefHasFinalizer;
|
|
|
|
if(pfinq->fn == nil)
|
|
|
|
throw("finalizer inconsistency");
|
|
|
|
pfinq++;
|
|
|
|
}
|
|
|
|
// fall through
|
|
|
|
case RefSome:
|
2010-03-23 21:48:23 -06:00
|
|
|
*gcrefp = RefNone | (ref&RefFlags);
|
2010-02-10 15:59:39 -07:00
|
|
|
break;
|
2009-01-26 18:37:05 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void
|
|
|
|
sweep(void)
|
|
|
|
{
|
2009-01-28 16:22:16 -07:00
|
|
|
MSpan *s;
|
2009-01-26 18:37:05 -07:00
|
|
|
|
2010-02-03 17:31:34 -07:00
|
|
|
// Sweep all the spans marking blocks to be finalized.
|
2009-01-28 16:22:16 -07:00
|
|
|
for(s = mheap.allspans; s != nil; s = s->allnext)
|
2010-02-10 15:59:39 -07:00
|
|
|
if(s->state == MSpanInUse)
|
|
|
|
sweepspan0(s);
|
2010-02-10 01:00:12 -07:00
|
|
|
|
2010-02-03 17:31:34 -07:00
|
|
|
// Sweep again queueing finalizers and freeing the others.
|
|
|
|
for(s = mheap.allspans; s != nil; s = s->allnext)
|
2010-02-10 15:59:39 -07:00
|
|
|
if(s->state == MSpanInUse)
|
|
|
|
sweepspan1(s);
|
2009-01-26 18:37:05 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Semaphore, not Lock, so that the goroutine
|
|
|
|
// reschedules when there is contention rather
|
|
|
|
// than spinning.
|
|
|
|
static uint32 gcsema = 1;
|
|
|
|
|
|
|
|
// Initialized from $GOGC. GOGC=off means no gc.
|
|
|
|
//
|
|
|
|
// Next gc is after we've allocated an extra amount of
|
|
|
|
// memory proportional to the amount already in use.
|
|
|
|
// If gcpercent=100 and we're using 4M, we'll gc again
|
|
|
|
// when we get to 8M. This keeps the gc cost in linear
|
|
|
|
// proportion to the allocation cost. Adjusting gcpercent
|
|
|
|
// just changes the linear constant (and also the amount of
|
|
|
|
// extra memory used).
|
|
|
|
static int32 gcpercent = -2;
|
|
|
|
|
2010-03-08 15:15:44 -07:00
|
|
|
static void
|
|
|
|
stealcache(void)
|
|
|
|
{
|
|
|
|
M *m;
|
|
|
|
|
|
|
|
for(m=allm; m; m=m->alllink)
|
|
|
|
MCache_ReleaseAll(m->mcache);
|
|
|
|
}
|
|
|
|
|
2009-01-26 18:37:05 -07:00
|
|
|
void
|
|
|
|
gc(int32 force)
|
|
|
|
{
|
2010-02-08 15:32:22 -07:00
|
|
|
int64 t0, t1;
|
2009-01-26 18:37:05 -07:00
|
|
|
byte *p;
|
2010-02-08 22:41:54 -07:00
|
|
|
Finq *fp;
|
2009-01-26 18:37:05 -07:00
|
|
|
|
|
|
|
// The gc is turned off (via enablegc) until
|
|
|
|
// the bootstrap has completed.
|
|
|
|
// Also, malloc gets called in the guts
|
|
|
|
// of a number of libraries that might be
|
|
|
|
// holding locks. To avoid priority inversion
|
|
|
|
// problems, don't bother trying to run gc
|
|
|
|
// while holding a lock. The next mallocgc
|
|
|
|
// without a lock will do the gc instead.
|
|
|
|
if(!mstats.enablegc || m->locks > 0 || panicking)
|
|
|
|
return;
|
|
|
|
|
|
|
|
if(gcpercent == -2) { // first time through
|
|
|
|
p = getenv("GOGC");
|
|
|
|
if(p == nil || p[0] == '\0')
|
|
|
|
gcpercent = 100;
|
|
|
|
else if(strcmp(p, (byte*)"off") == 0)
|
|
|
|
gcpercent = -1;
|
|
|
|
else
|
|
|
|
gcpercent = atoi(p);
|
|
|
|
}
|
2009-06-05 11:59:37 -06:00
|
|
|
if(gcpercent < 0)
|
2009-01-26 18:37:05 -07:00
|
|
|
return;
|
|
|
|
|
|
|
|
semacquire(&gcsema);
|
2010-02-08 15:32:22 -07:00
|
|
|
t0 = nanotime();
|
2009-08-14 21:33:20 -06:00
|
|
|
m->gcing = 1;
|
2009-01-26 18:37:05 -07:00
|
|
|
stoptheworld();
|
|
|
|
if(mheap.Lock.key != 0)
|
|
|
|
throw("mheap locked during gc");
|
2010-03-08 15:15:44 -07:00
|
|
|
if(force || mstats.heap_alloc >= mstats.next_gc) {
|
2009-01-26 18:37:05 -07:00
|
|
|
mark();
|
|
|
|
sweep();
|
2010-03-08 15:15:44 -07:00
|
|
|
stealcache();
|
|
|
|
mstats.next_gc = mstats.heap_alloc+mstats.heap_alloc*gcpercent/100;
|
2009-01-26 18:37:05 -07:00
|
|
|
}
|
2009-06-15 22:31:56 -06:00
|
|
|
m->gcing = 0;
|
2010-02-10 01:00:12 -07:00
|
|
|
|
2010-02-03 17:31:34 -07:00
|
|
|
// kick off goroutines to run queued finalizers
|
|
|
|
m->locks++; // disable gc during the mallocs in newproc
|
2010-02-08 22:41:54 -07:00
|
|
|
for(fp=finq; fp<pfinq; fp++) {
|
|
|
|
newproc1((byte*)fp->fn, (byte*)&fp->p, sizeof(fp->p), fp->nret);
|
|
|
|
fp->fn = nil;
|
|
|
|
fp->p = nil;
|
2010-02-03 17:31:34 -07:00
|
|
|
}
|
|
|
|
pfinq = finq;
|
|
|
|
m->locks--;
|
|
|
|
|
2010-02-08 15:32:22 -07:00
|
|
|
t1 = nanotime();
|
|
|
|
mstats.numgc++;
|
|
|
|
mstats.pause_ns += t1 - t0;
|
|
|
|
if(mstats.debuggc)
|
|
|
|
printf("pause %D\n", t1-t0);
|
2009-08-14 21:33:20 -06:00
|
|
|
semrelease(&gcsema);
|
2010-01-09 10:47:45 -07:00
|
|
|
starttheworld();
|
2009-01-26 18:37:05 -07:00
|
|
|
}
|