mirror of
https://github.com/golang/go
synced 2024-11-24 20:50:11 -07:00
runtime: run all finalizers in a single goroutine.
eliminate second pass of mark+sweep by scanning finalizer table specially. R=r CC=golang-dev https://golang.org/cl/782041
This commit is contained in:
parent
1ec91c8d1d
commit
4e28cfe970
@ -31,7 +31,7 @@ func Caller(skip int) (pc uintptr, file string, line int, ok bool)
|
|||||||
// on the calling goroutine's stack. The argument skip is the number of stack frames
|
// on the calling goroutine's stack. The argument skip is the number of stack frames
|
||||||
// to skip before recording in pc, with 0 starting at the caller of Caller.
|
// to skip before recording in pc, with 0 starting at the caller of Caller.
|
||||||
// It returns the number of entries written to pc.
|
// It returns the number of entries written to pc.
|
||||||
func Callers(skip int, pc []int) int
|
func Callers(skip int, pc []uintptr) int
|
||||||
|
|
||||||
// FuncForPC returns a *Func describing the function that contains the
|
// FuncForPC returns a *Func describing the function that contains the
|
||||||
// given program counter address, or else nil.
|
// given program counter address, or else nil.
|
||||||
@ -208,6 +208,10 @@ func GC()
|
|||||||
// to depend on a finalizer to flush an in-memory I/O buffer such as a
|
// to depend on a finalizer to flush an in-memory I/O buffer such as a
|
||||||
// bufio.Writer, because the buffer would not be flushed at program exit.
|
// bufio.Writer, because the buffer would not be flushed at program exit.
|
||||||
//
|
//
|
||||||
|
// A single goroutine runs all finalizers for a program, sequentially.
|
||||||
|
// If a finalizer must run for a long time, it should do so by starting
|
||||||
|
// a new goroutine.
|
||||||
|
//
|
||||||
// TODO(rsc): make os.File use SetFinalizer
|
// TODO(rsc): make os.File use SetFinalizer
|
||||||
// TODO(rsc): allow f to have (ignored) return values
|
// TODO(rsc): allow f to have (ignored) return values
|
||||||
//
|
//
|
||||||
|
@ -366,7 +366,7 @@ func SetFinalizer(obj Eface, finalizer Eface) {
|
|||||||
}
|
}
|
||||||
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
|
nret = (nret + sizeof(void*)-1) & ~(sizeof(void*)-1);
|
||||||
|
|
||||||
if(getfinalizer(obj.data, 0, nil)) {
|
if(getfinalizer(obj.data, 0)) {
|
||||||
printf("runtime.SetFinalizer: finalizer already set");
|
printf("runtime.SetFinalizer: finalizer already set");
|
||||||
goto throw;
|
goto throw;
|
||||||
}
|
}
|
||||||
|
@ -330,8 +330,6 @@ void* SysAlloc(uintptr);
|
|||||||
void SysUnused(void*, uintptr);
|
void SysUnused(void*, uintptr);
|
||||||
void SysFree(void*, uintptr);
|
void SysFree(void*, uintptr);
|
||||||
|
|
||||||
void* getfinalizer(void*, bool, int32*);
|
|
||||||
|
|
||||||
enum
|
enum
|
||||||
{
|
{
|
||||||
RefcountOverhead = 4, // one uint32 per object
|
RefcountOverhead = 4, // one uint32 per object
|
||||||
@ -340,7 +338,6 @@ enum
|
|||||||
RefStack, // stack segment - don't free and don't scan for pointers
|
RefStack, // stack segment - don't free and don't scan for pointers
|
||||||
RefNone, // no references
|
RefNone, // no references
|
||||||
RefSome, // some references
|
RefSome, // some references
|
||||||
RefFinalize, // ready to be finalized
|
|
||||||
RefNoPointers = 0x80000000U, // flag - no pointers here
|
RefNoPointers = 0x80000000U, // flag - no pointers here
|
||||||
RefHasFinalizer = 0x40000000U, // flag - has finalizer
|
RefHasFinalizer = 0x40000000U, // flag - has finalizer
|
||||||
RefProfiled = 0x20000000U, // flag - is in profiling table
|
RefProfiled = 0x20000000U, // flag - is in profiling table
|
||||||
@ -359,3 +356,14 @@ enum {
|
|||||||
MProf_All = 2,
|
MProf_All = 2,
|
||||||
};
|
};
|
||||||
extern int32 malloc_profile;
|
extern int32 malloc_profile;
|
||||||
|
|
||||||
|
typedef struct Finalizer Finalizer;
|
||||||
|
struct Finalizer
|
||||||
|
{
|
||||||
|
Finalizer *next; // for use by caller of getfinalizer
|
||||||
|
void (*fn)(void*);
|
||||||
|
void *arg;
|
||||||
|
int32 nret;
|
||||||
|
};
|
||||||
|
|
||||||
|
Finalizer* getfinalizer(void*, bool);
|
||||||
|
@ -18,17 +18,14 @@ typedef struct Fintab Fintab;
|
|||||||
struct Fintab
|
struct Fintab
|
||||||
{
|
{
|
||||||
void **key;
|
void **key;
|
||||||
struct {
|
Finalizer **val;
|
||||||
void *fn;
|
|
||||||
int32 nret;
|
|
||||||
} *val;
|
|
||||||
int32 nkey; // number of non-nil entries in key
|
int32 nkey; // number of non-nil entries in key
|
||||||
int32 ndead; // number of dead (-1) entries in key
|
int32 ndead; // number of dead (-1) entries in key
|
||||||
int32 max; // size of key, val allocations
|
int32 max; // size of key, val allocations
|
||||||
};
|
};
|
||||||
|
|
||||||
static void
|
static void
|
||||||
addfintab(Fintab *t, void *k, void *fn, int32 nret)
|
addfintab(Fintab *t, void *k, Finalizer *v)
|
||||||
{
|
{
|
||||||
int32 i, j;
|
int32 i, j;
|
||||||
|
|
||||||
@ -51,15 +48,14 @@ addfintab(Fintab *t, void *k, void *fn, int32 nret)
|
|||||||
|
|
||||||
ret:
|
ret:
|
||||||
t->key[i] = k;
|
t->key[i] = k;
|
||||||
t->val[i].fn = fn;
|
t->val[i] = v;
|
||||||
t->val[i].nret = nret;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static void*
|
static Finalizer*
|
||||||
lookfintab(Fintab *t, void *k, bool del, int32 *nret)
|
lookfintab(Fintab *t, void *k, bool del)
|
||||||
{
|
{
|
||||||
int32 i, j;
|
int32 i, j;
|
||||||
void *v;
|
Finalizer *v;
|
||||||
|
|
||||||
if(t->max == 0)
|
if(t->max == 0)
|
||||||
return nil;
|
return nil;
|
||||||
@ -68,13 +64,10 @@ lookfintab(Fintab *t, void *k, bool del, int32 *nret)
|
|||||||
if(t->key[i] == nil)
|
if(t->key[i] == nil)
|
||||||
return nil;
|
return nil;
|
||||||
if(t->key[i] == k) {
|
if(t->key[i] == k) {
|
||||||
v = t->val[i].fn;
|
v = t->val[i];
|
||||||
if(nret)
|
|
||||||
*nret = t->val[i].nret;
|
|
||||||
if(del) {
|
if(del) {
|
||||||
t->key[i] = (void*)-1;
|
t->key[i] = (void*)-1;
|
||||||
t->val[i].fn = nil;
|
t->val[i] = nil;
|
||||||
t->val[i].nret = 0;
|
|
||||||
t->ndead++;
|
t->ndead++;
|
||||||
}
|
}
|
||||||
return v;
|
return v;
|
||||||
@ -98,6 +91,14 @@ addfinalizer(void *p, void (*f)(void*), int32 nret)
|
|||||||
int32 i;
|
int32 i;
|
||||||
uint32 *ref;
|
uint32 *ref;
|
||||||
byte *base;
|
byte *base;
|
||||||
|
Finalizer *e;
|
||||||
|
|
||||||
|
e = nil;
|
||||||
|
if(f != nil) {
|
||||||
|
e = mal(sizeof *e);
|
||||||
|
e->fn = f;
|
||||||
|
e->nret = nret;
|
||||||
|
}
|
||||||
|
|
||||||
lock(&finlock);
|
lock(&finlock);
|
||||||
if(!mlookup(p, &base, nil, nil, &ref) || p != base) {
|
if(!mlookup(p, &base, nil, nil, &ref) || p != base) {
|
||||||
@ -106,7 +107,7 @@ addfinalizer(void *p, void (*f)(void*), int32 nret)
|
|||||||
}
|
}
|
||||||
if(f == nil) {
|
if(f == nil) {
|
||||||
if(*ref & RefHasFinalizer) {
|
if(*ref & RefHasFinalizer) {
|
||||||
lookfintab(&fintab, p, 1, nil);
|
lookfintab(&fintab, p, 1);
|
||||||
*ref &= ~RefHasFinalizer;
|
*ref &= ~RefHasFinalizer;
|
||||||
}
|
}
|
||||||
unlock(&finlock);
|
unlock(&finlock);
|
||||||
@ -141,26 +142,41 @@ addfinalizer(void *p, void (*f)(void*), int32 nret)
|
|||||||
|
|
||||||
k = fintab.key[i];
|
k = fintab.key[i];
|
||||||
if(k != nil && k != (void*)-1)
|
if(k != nil && k != (void*)-1)
|
||||||
addfintab(&newtab, k, fintab.val[i].fn, fintab.val[i].nret);
|
addfintab(&newtab, k, fintab.val[i]);
|
||||||
}
|
}
|
||||||
free(fintab.key);
|
free(fintab.key);
|
||||||
free(fintab.val);
|
free(fintab.val);
|
||||||
fintab = newtab;
|
fintab = newtab;
|
||||||
}
|
}
|
||||||
|
|
||||||
addfintab(&fintab, p, f, nret);
|
addfintab(&fintab, p, e);
|
||||||
unlock(&finlock);
|
unlock(&finlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
// get finalizer; if del, delete finalizer.
|
// get finalizer; if del, delete finalizer.
|
||||||
// caller is responsible for updating RefHasFinalizer bit.
|
// caller is responsible for updating RefHasFinalizer bit.
|
||||||
void*
|
Finalizer*
|
||||||
getfinalizer(void *p, bool del, int32 *nret)
|
getfinalizer(void *p, bool del)
|
||||||
{
|
{
|
||||||
void *f;
|
Finalizer *f;
|
||||||
|
|
||||||
lock(&finlock);
|
lock(&finlock);
|
||||||
f = lookfintab(&fintab, p, del, nret);
|
f = lookfintab(&fintab, p, del);
|
||||||
unlock(&finlock);
|
unlock(&finlock);
|
||||||
return f;
|
return f;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void
|
||||||
|
walkfintab(void (*fn)(void*))
|
||||||
|
{
|
||||||
|
void **key;
|
||||||
|
void **ekey;
|
||||||
|
|
||||||
|
lock(&finlock);
|
||||||
|
key = fintab.key;
|
||||||
|
ekey = key + fintab.max;
|
||||||
|
for(; key < ekey; key++)
|
||||||
|
if(*key != nil && *key != ((void*)-1))
|
||||||
|
fn(*key);
|
||||||
|
unlock(&finlock);
|
||||||
|
}
|
||||||
|
@ -23,19 +23,10 @@ extern byte data[];
|
|||||||
extern byte etext[];
|
extern byte etext[];
|
||||||
extern byte end[];
|
extern byte end[];
|
||||||
|
|
||||||
typedef struct Finq Finq;
|
static G *fing;
|
||||||
struct Finq
|
static Finalizer *finq;
|
||||||
{
|
|
||||||
void (*fn)(void*);
|
|
||||||
void *p;
|
|
||||||
int32 nret;
|
|
||||||
};
|
|
||||||
|
|
||||||
static Finq finq[128]; // finalizer queue - two elements per entry
|
|
||||||
static Finq *pfinq = finq;
|
|
||||||
static Finq *efinq = finq+nelem(finq);
|
|
||||||
|
|
||||||
static void sweepblock(byte*, int64, uint32*, int32);
|
static void sweepblock(byte*, int64, uint32*, int32);
|
||||||
|
static void runfinq(void);
|
||||||
|
|
||||||
enum {
|
enum {
|
||||||
PtrSize = sizeof(void*)
|
PtrSize = sizeof(void*)
|
||||||
@ -68,12 +59,6 @@ scanblock(int32 depth, byte *b, int64 n)
|
|||||||
if(mlookup(obj, &obj, &size, nil, &refp)) {
|
if(mlookup(obj, &obj, &size, nil, &refp)) {
|
||||||
ref = *refp;
|
ref = *refp;
|
||||||
switch(ref & ~RefFlags) {
|
switch(ref & ~RefFlags) {
|
||||||
case RefFinalize:
|
|
||||||
// If marked for finalization already, some other finalization-ready
|
|
||||||
// object has a pointer: turn off finalization until that object is gone.
|
|
||||||
// This means that cyclic finalizer loops never get collected,
|
|
||||||
// so don't do that.
|
|
||||||
/* fall through */
|
|
||||||
case RefNone:
|
case RefNone:
|
||||||
if(Debug > 1)
|
if(Debug > 1)
|
||||||
printf("%d found at %p: ", depth, &vp[i]);
|
printf("%d found at %p: ", depth, &vp[i]);
|
||||||
@ -106,6 +91,21 @@ scanstack(G *gp)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
markfin(void *v)
|
||||||
|
{
|
||||||
|
uintptr size;
|
||||||
|
uint32 *refp;
|
||||||
|
|
||||||
|
size = 0;
|
||||||
|
refp = nil;
|
||||||
|
if(!mlookup(v, &v, &size, nil, &refp) || !(*refp & RefHasFinalizer))
|
||||||
|
throw("mark - finalizer inconsistency");
|
||||||
|
|
||||||
|
// do not mark the finalizer block itself. just mark the things it points at.
|
||||||
|
scanblock(1, v, size);
|
||||||
|
}
|
||||||
|
|
||||||
static void
|
static void
|
||||||
mark(void)
|
mark(void)
|
||||||
{
|
{
|
||||||
@ -137,58 +137,26 @@ mark(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mark things pointed at by objects with finalizers
|
||||||
|
walkfintab(markfin);
|
||||||
}
|
}
|
||||||
|
|
||||||
// pass 0: mark RefNone with finalizer as RefFinalize and trace
|
// free RefNone, free & queue finalizers for RefNone|RefHasFinalizer, reset RefSome
|
||||||
static void
|
static void
|
||||||
sweepspan0(MSpan *s)
|
sweepspan(MSpan *s)
|
||||||
{
|
|
||||||
byte *p;
|
|
||||||
uint32 ref, *gcrefp, *gcrefep;
|
|
||||||
int32 n, size, npages;
|
|
||||||
|
|
||||||
p = (byte*)(s->start << PageShift);
|
|
||||||
if(s->sizeclass == 0) {
|
|
||||||
// Large block.
|
|
||||||
ref = s->gcref0;
|
|
||||||
if((ref&~(RefFlags^RefHasFinalizer)) == (RefNone|RefHasFinalizer)) {
|
|
||||||
// Mark as finalizable.
|
|
||||||
s->gcref0 = RefFinalize | RefHasFinalizer | (ref&(RefFlags^RefHasFinalizer));
|
|
||||||
if(!(ref & RefNoPointers))
|
|
||||||
scanblock(100, p, s->npages<<PageShift);
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Chunk full of small blocks.
|
|
||||||
MGetSizeClassInfo(s->sizeclass, &size, &npages, &n);
|
|
||||||
gcrefp = s->gcref;
|
|
||||||
gcrefep = s->gcref + n;
|
|
||||||
for(; gcrefp < gcrefep; gcrefp++) {
|
|
||||||
ref = *gcrefp;
|
|
||||||
if((ref&~(RefFlags^RefHasFinalizer)) == (RefNone|RefHasFinalizer)) {
|
|
||||||
// Mark as finalizable.
|
|
||||||
*gcrefp = RefFinalize | RefHasFinalizer | (ref&(RefFlags^RefHasFinalizer));
|
|
||||||
if(!(ref & RefNoPointers))
|
|
||||||
scanblock(100, p+(gcrefp-s->gcref)*size, size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// pass 1: free RefNone, queue RefFinalize, reset RefSome
|
|
||||||
static void
|
|
||||||
sweepspan1(MSpan *s)
|
|
||||||
{
|
{
|
||||||
int32 n, npages, size;
|
int32 n, npages, size;
|
||||||
byte *p;
|
byte *p;
|
||||||
uint32 ref, *gcrefp, *gcrefep;
|
uint32 ref, *gcrefp, *gcrefep;
|
||||||
MCache *c;
|
MCache *c;
|
||||||
|
Finalizer *f;
|
||||||
|
|
||||||
p = (byte*)(s->start << PageShift);
|
p = (byte*)(s->start << PageShift);
|
||||||
if(s->sizeclass == 0) {
|
if(s->sizeclass == 0) {
|
||||||
// Large block.
|
// Large block.
|
||||||
ref = s->gcref0;
|
ref = s->gcref0;
|
||||||
switch(ref & ~RefFlags) {
|
switch(ref & ~(RefFlags^RefHasFinalizer)) {
|
||||||
case RefNone:
|
case RefNone:
|
||||||
// Free large object.
|
// Free large object.
|
||||||
mstats.alloc -= s->npages<<PageShift;
|
mstats.alloc -= s->npages<<PageShift;
|
||||||
@ -198,18 +166,17 @@ sweepspan1(MSpan *s)
|
|||||||
s->gcref0 = RefFree;
|
s->gcref0 = RefFree;
|
||||||
MHeap_Free(&mheap, s, 1);
|
MHeap_Free(&mheap, s, 1);
|
||||||
break;
|
break;
|
||||||
case RefFinalize:
|
case RefNone|RefHasFinalizer:
|
||||||
if(pfinq < efinq) {
|
f = getfinalizer(p, 1);
|
||||||
pfinq->p = p;
|
if(f == nil)
|
||||||
pfinq->nret = 0;
|
throw("finalizer inconsistency");
|
||||||
pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
|
f->arg = p;
|
||||||
ref &= ~RefHasFinalizer;
|
f->next = finq;
|
||||||
if(pfinq->fn == nil)
|
finq = f;
|
||||||
throw("finalizer inconsistency");
|
ref &= ~RefHasFinalizer;
|
||||||
pfinq++;
|
|
||||||
}
|
|
||||||
// fall through
|
// fall through
|
||||||
case RefSome:
|
case RefSome:
|
||||||
|
case RefSome|RefHasFinalizer:
|
||||||
s->gcref0 = RefNone | (ref&RefFlags);
|
s->gcref0 = RefNone | (ref&RefFlags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -224,7 +191,7 @@ sweepspan1(MSpan *s)
|
|||||||
ref = *gcrefp;
|
ref = *gcrefp;
|
||||||
if(ref < RefNone) // RefFree or RefStack
|
if(ref < RefNone) // RefFree or RefStack
|
||||||
continue;
|
continue;
|
||||||
switch(ref & ~RefFlags) {
|
switch(ref & ~(RefFlags^RefHasFinalizer)) {
|
||||||
case RefNone:
|
case RefNone:
|
||||||
// Free small object.
|
// Free small object.
|
||||||
if(ref & RefProfiled)
|
if(ref & RefProfiled)
|
||||||
@ -237,18 +204,17 @@ sweepspan1(MSpan *s)
|
|||||||
mstats.by_size[s->sizeclass].nfree++;
|
mstats.by_size[s->sizeclass].nfree++;
|
||||||
MCache_Free(c, p, s->sizeclass, size);
|
MCache_Free(c, p, s->sizeclass, size);
|
||||||
break;
|
break;
|
||||||
case RefFinalize:
|
case RefNone|RefHasFinalizer:
|
||||||
if(pfinq < efinq) {
|
f = getfinalizer(p, 1);
|
||||||
pfinq->p = p;
|
if(f == nil)
|
||||||
pfinq->nret = 0;
|
throw("finalizer inconsistency");
|
||||||
pfinq->fn = getfinalizer(p, 1, &pfinq->nret);
|
f->arg = p;
|
||||||
ref &= ~RefHasFinalizer;
|
f->next = finq;
|
||||||
if(pfinq->fn == nil)
|
finq = f;
|
||||||
throw("finalizer inconsistency");
|
ref &= ~RefHasFinalizer;
|
||||||
pfinq++;
|
|
||||||
}
|
|
||||||
// fall through
|
// fall through
|
||||||
case RefSome:
|
case RefSome:
|
||||||
|
case RefSome|RefHasFinalizer:
|
||||||
*gcrefp = RefNone | (ref&RefFlags);
|
*gcrefp = RefNone | (ref&RefFlags);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -260,15 +226,9 @@ sweep(void)
|
|||||||
{
|
{
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
|
|
||||||
// Sweep all the spans marking blocks to be finalized.
|
|
||||||
for(s = mheap.allspans; s != nil; s = s->allnext)
|
for(s = mheap.allspans; s != nil; s = s->allnext)
|
||||||
if(s->state == MSpanInUse)
|
if(s->state == MSpanInUse)
|
||||||
sweepspan0(s);
|
sweepspan(s);
|
||||||
|
|
||||||
// Sweep again queueing finalizers and freeing the others.
|
|
||||||
for(s = mheap.allspans; s != nil; s = s->allnext)
|
|
||||||
if(s->state == MSpanInUse)
|
|
||||||
sweepspan1(s);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Semaphore, not Lock, so that the goroutine
|
// Semaphore, not Lock, so that the goroutine
|
||||||
@ -301,7 +261,7 @@ gc(int32 force)
|
|||||||
{
|
{
|
||||||
int64 t0, t1;
|
int64 t0, t1;
|
||||||
byte *p;
|
byte *p;
|
||||||
Finq *fp;
|
Finalizer *fp;
|
||||||
|
|
||||||
// The gc is turned off (via enablegc) until
|
// The gc is turned off (via enablegc) until
|
||||||
// the bootstrap has completed.
|
// the bootstrap has completed.
|
||||||
@ -340,14 +300,15 @@ gc(int32 force)
|
|||||||
}
|
}
|
||||||
m->gcing = 0;
|
m->gcing = 0;
|
||||||
|
|
||||||
// kick off goroutines to run queued finalizers
|
|
||||||
m->locks++; // disable gc during the mallocs in newproc
|
m->locks++; // disable gc during the mallocs in newproc
|
||||||
for(fp=finq; fp<pfinq; fp++) {
|
fp = finq;
|
||||||
newproc1((byte*)fp->fn, (byte*)&fp->p, sizeof(fp->p), fp->nret);
|
if(fp != nil) {
|
||||||
fp->fn = nil;
|
// kick off or wake up goroutine to run queued finalizers
|
||||||
fp->p = nil;
|
if(fing == nil)
|
||||||
|
fing = newproc1((byte*)runfinq, nil, 0, 0);
|
||||||
|
else if(fing->status == Gwaiting)
|
||||||
|
ready(fing);
|
||||||
}
|
}
|
||||||
pfinq = finq;
|
|
||||||
m->locks--;
|
m->locks--;
|
||||||
|
|
||||||
t1 = nanotime();
|
t1 = nanotime();
|
||||||
@ -357,4 +318,42 @@ gc(int32 force)
|
|||||||
printf("pause %D\n", t1-t0);
|
printf("pause %D\n", t1-t0);
|
||||||
semrelease(&gcsema);
|
semrelease(&gcsema);
|
||||||
starttheworld();
|
starttheworld();
|
||||||
|
|
||||||
|
// give the queued finalizers, if any, a chance to run
|
||||||
|
if(fp != nil)
|
||||||
|
gosched();
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
runfinq(void)
|
||||||
|
{
|
||||||
|
Finalizer *f, *next;
|
||||||
|
byte *frame;
|
||||||
|
|
||||||
|
for(;;) {
|
||||||
|
// There's no need for a lock in this section
|
||||||
|
// because it only conflicts with the garbage
|
||||||
|
// collector, and the garbage collector only
|
||||||
|
// runs when everyone else is stopped, and
|
||||||
|
// runfinq only stops at the gosched() or
|
||||||
|
// during the calls in the for loop.
|
||||||
|
f = finq;
|
||||||
|
finq = nil;
|
||||||
|
if(f == nil) {
|
||||||
|
g->status = Gwaiting;
|
||||||
|
gosched();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
for(; f; f=next) {
|
||||||
|
next = f->next;
|
||||||
|
frame = mal(sizeof(uintptr) + f->nret);
|
||||||
|
*(void**)frame = f->arg;
|
||||||
|
reflect·call((byte*)f->fn, frame, sizeof(uintptr) + f->nret);
|
||||||
|
free(frame);
|
||||||
|
f->fn = nil;
|
||||||
|
f->arg = nil;
|
||||||
|
f->next = nil;
|
||||||
|
}
|
||||||
|
gc(1); // trigger another gc to clean up the finalized objects, if possible
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
@ -775,7 +775,7 @@ void
|
|||||||
newproc1(fn, (byte*)(&fn+1), siz, 0);
|
newproc1(fn, (byte*)(&fn+1), siz, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
G*
|
||||||
newproc1(byte *fn, byte *argp, int32 narg, int32 nret)
|
newproc1(byte *fn, byte *argp, int32 narg, int32 nret)
|
||||||
{
|
{
|
||||||
byte *sp;
|
byte *sp;
|
||||||
@ -815,6 +815,7 @@ newproc1(byte *fn, byte *argp, int32 narg, int32 nret)
|
|||||||
newprocreadylocked(newg);
|
newprocreadylocked(newg);
|
||||||
unlock(&sched);
|
unlock(&sched);
|
||||||
|
|
||||||
|
return newg;
|
||||||
//printf(" goid=%d\n", newg->goid);
|
//printf(" goid=%d\n", newg->goid);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -396,6 +396,8 @@ uint32 noequal(uint32, void*, void*);
|
|||||||
void* malloc(uintptr size);
|
void* malloc(uintptr size);
|
||||||
void free(void *v);
|
void free(void *v);
|
||||||
void addfinalizer(void*, void(*fn)(void*), int32);
|
void addfinalizer(void*, void(*fn)(void*), int32);
|
||||||
|
void walkfintab(void (*fn)(void*));
|
||||||
|
|
||||||
void exit(int32);
|
void exit(int32);
|
||||||
void breakpoint(void);
|
void breakpoint(void);
|
||||||
void gosched(void);
|
void gosched(void);
|
||||||
@ -403,7 +405,7 @@ void goexit(void);
|
|||||||
void runcgo(void (*fn)(void*), void*);
|
void runcgo(void (*fn)(void*), void*);
|
||||||
void ·entersyscall(void);
|
void ·entersyscall(void);
|
||||||
void ·exitsyscall(void);
|
void ·exitsyscall(void);
|
||||||
void newproc1(byte*, byte*, int32, int32);
|
G* newproc1(byte*, byte*, int32, int32);
|
||||||
void siginit(void);
|
void siginit(void);
|
||||||
bool sigsend(int32 sig);
|
bool sigsend(int32 sig);
|
||||||
void gettime(int64*, int32*);
|
void gettime(int64*, int32*);
|
||||||
@ -508,6 +510,7 @@ void runtime_printhex(uint64);
|
|||||||
void runtime_printslice(Slice);
|
void runtime_printslice(Slice);
|
||||||
void runtime_printcomplex(Complex128);
|
void runtime_printcomplex(Complex128);
|
||||||
void ·panicl(int32);
|
void ·panicl(int32);
|
||||||
|
void reflect·call(byte*, byte*, uint32);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* wrapped for go users
|
* wrapped for go users
|
||||||
|
@ -8,7 +8,10 @@
|
|||||||
|
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import "runtime"
|
import (
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
const N = 250
|
const N = 250
|
||||||
|
|
||||||
@ -25,12 +28,14 @@ var i int
|
|||||||
var nfinal int
|
var nfinal int
|
||||||
var final [N]int
|
var final [N]int
|
||||||
|
|
||||||
func finalA(a *A) {
|
// the unused return is to test finalizers with return values
|
||||||
|
func finalA(a *A) (unused [N]int) {
|
||||||
if final[a.n] != 0 {
|
if final[a.n] != 0 {
|
||||||
println("finalA", a.n, final[a.n])
|
println("finalA", a.n, final[a.n])
|
||||||
panic("fail")
|
panic("fail")
|
||||||
}
|
}
|
||||||
final[a.n] = 1
|
final[a.n] = 1
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func finalB(b *B) {
|
func finalB(b *B) {
|
||||||
@ -53,6 +58,7 @@ func main() {
|
|||||||
for i := 0; i < N; i++ {
|
for i := 0; i < N; i++ {
|
||||||
runtime.GC()
|
runtime.GC()
|
||||||
runtime.Gosched()
|
runtime.Gosched()
|
||||||
|
time.Sleep(1e6);
|
||||||
}
|
}
|
||||||
if nfinal < N*8/10 {
|
if nfinal < N*8/10 {
|
||||||
panic("not enough finalizing:", nfinal, "/", N)
|
panic("not enough finalizing:", nfinal, "/", N)
|
||||||
|
Loading…
Reference in New Issue
Block a user