mirror of
https://github.com/golang/go
synced 2024-11-22 05:44:41 -07:00
gc #0. mark and sweep collector.
R=r,gri DELTA=472 (423 added, 2 deleted, 47 changed) OCL=23522 CL=23541
This commit is contained in:
parent
5b129cda5f
commit
1ce17918e3
@ -11,9 +11,14 @@ package malloc
|
|||||||
type Stats struct {
|
type Stats struct {
|
||||||
Alloc uint64;
|
Alloc uint64;
|
||||||
Sys uint64;
|
Sys uint64;
|
||||||
};
|
Stacks uint64;
|
||||||
|
InusePages uint64;
|
||||||
|
NextGC uint64;
|
||||||
|
EnableGC bool;
|
||||||
|
}
|
||||||
|
|
||||||
func Alloc(uint64) *byte;
|
func Alloc(uint64) *byte
|
||||||
func Free(*byte);
|
func Free(*byte)
|
||||||
func GetStats() *Stats;
|
func GetStats() *Stats
|
||||||
func Lookup(*byte) (*byte, uintptr);
|
func Lookup(*byte) (*byte, uintptr)
|
||||||
|
func GC()
|
||||||
|
@ -29,6 +29,7 @@ LIBOFILES=\
|
|||||||
mcentral.$O\
|
mcentral.$O\
|
||||||
mem.$O\
|
mem.$O\
|
||||||
mfixalloc.$O\
|
mfixalloc.$O\
|
||||||
|
mgc0.$O\
|
||||||
mheap.$O\
|
mheap.$O\
|
||||||
msize.$O\
|
msize.$O\
|
||||||
print.$O\
|
print.$O\
|
||||||
|
@ -24,6 +24,7 @@ malloc(uintptr size)
|
|||||||
uintptr npages;
|
uintptr npages;
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
void *v;
|
void *v;
|
||||||
|
uint32 *ref;
|
||||||
|
|
||||||
if(m->mallocing)
|
if(m->mallocing)
|
||||||
throw("malloc - deadlock");
|
throw("malloc - deadlock");
|
||||||
@ -55,10 +56,25 @@ malloc(uintptr size)
|
|||||||
v = (void*)(s->start << PageShift);
|
v = (void*)(s->start << PageShift);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setup for mark sweep
|
||||||
|
mlookup(v, nil, nil, &ref);
|
||||||
|
*ref = RefNone;
|
||||||
|
|
||||||
m->mallocing = 0;
|
m->mallocing = 0;
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void*
|
||||||
|
mallocgc(uintptr size)
|
||||||
|
{
|
||||||
|
void *v;
|
||||||
|
|
||||||
|
v = malloc(size);
|
||||||
|
if(mstats.inuse_pages > mstats.next_gc)
|
||||||
|
gc(0);
|
||||||
|
return v;
|
||||||
|
}
|
||||||
|
|
||||||
// Free the object whose base pointer is v.
|
// Free the object whose base pointer is v.
|
||||||
void
|
void
|
||||||
free(void *v)
|
free(void *v)
|
||||||
@ -67,10 +83,14 @@ free(void *v)
|
|||||||
uintptr page, tmp;
|
uintptr page, tmp;
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
MCache *c;
|
MCache *c;
|
||||||
|
uint32 *ref;
|
||||||
|
|
||||||
if(v == nil)
|
if(v == nil)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
mlookup(v, nil, nil, &ref);
|
||||||
|
*ref = RefFree;
|
||||||
|
|
||||||
// Find size class for v.
|
// Find size class for v.
|
||||||
page = (uintptr)v >> PageShift;
|
page = (uintptr)v >> PageShift;
|
||||||
sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp);
|
sizeclass = MHeapMapCache_GET(&mheap.mapcache, page, tmp);
|
||||||
@ -98,32 +118,51 @@ free(void *v)
|
|||||||
MCache_Free(c, v, sizeclass, size);
|
MCache_Free(c, v, sizeclass, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
int32
|
||||||
mlookup(void *v, byte **base, uintptr *size)
|
mlookup(void *v, byte **base, uintptr *size, uint32 **ref)
|
||||||
{
|
{
|
||||||
uintptr n, off;
|
uintptr n, i;
|
||||||
byte *p;
|
byte *p;
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
|
|
||||||
s = MHeap_Lookup(&mheap, (uintptr)v>>PageShift);
|
s = MHeap_LookupMaybe(&mheap, (uintptr)v>>PageShift);
|
||||||
if(s == nil) {
|
if(s == nil) {
|
||||||
*base = nil;
|
if(base)
|
||||||
*size = 0;
|
*base = nil;
|
||||||
return;
|
if(size)
|
||||||
|
*size = 0;
|
||||||
|
if(ref)
|
||||||
|
*ref = 0;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
p = (byte*)((uintptr)s->start<<PageShift);
|
p = (byte*)((uintptr)s->start<<PageShift);
|
||||||
if(s->sizeclass == 0) {
|
if(s->sizeclass == 0) {
|
||||||
// Large object.
|
// Large object.
|
||||||
*base = p;
|
if(base)
|
||||||
*size = s->npages<<PageShift;
|
*base = p;
|
||||||
return;
|
if(size)
|
||||||
|
*size = s->npages<<PageShift;
|
||||||
|
if(ref)
|
||||||
|
*ref = &s->gcref0;
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
n = class_to_size[s->sizeclass];
|
n = class_to_size[s->sizeclass];
|
||||||
off = ((byte*)v - p)/n * n;
|
i = ((byte*)v - p)/n;
|
||||||
*base = p+off;
|
if(base)
|
||||||
*size = n;
|
*base = p + i*n;
|
||||||
|
if(size)
|
||||||
|
*size = n;
|
||||||
|
if((byte*)s->gcref < p || (byte*)s->gcref >= p+(s->npages<<PageShift)) {
|
||||||
|
printf("s->base sizeclass %d %p gcref %p block %D\n",
|
||||||
|
s->sizeclass, p, s->gcref, s->npages<<PageShift);
|
||||||
|
throw("bad gcref");
|
||||||
|
}
|
||||||
|
if(ref)
|
||||||
|
*ref = &s->gcref[i];
|
||||||
|
|
||||||
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
MCache*
|
MCache*
|
||||||
@ -193,7 +232,7 @@ mal(uint32 n)
|
|||||||
//return oldmal(n);
|
//return oldmal(n);
|
||||||
void *v;
|
void *v;
|
||||||
|
|
||||||
v = malloc(n);
|
v = mallocgc(n);
|
||||||
|
|
||||||
if(0) {
|
if(0) {
|
||||||
byte *p;
|
byte *p;
|
||||||
@ -227,6 +266,7 @@ void*
|
|||||||
stackalloc(uint32 n)
|
stackalloc(uint32 n)
|
||||||
{
|
{
|
||||||
void *v;
|
void *v;
|
||||||
|
uint32 *ref;
|
||||||
|
|
||||||
//return oldmal(n);
|
//return oldmal(n);
|
||||||
if(m->mallocing) {
|
if(m->mallocing) {
|
||||||
@ -241,7 +281,10 @@ stackalloc(uint32 n)
|
|||||||
unlock(&stacks);
|
unlock(&stacks);
|
||||||
return v;
|
return v;
|
||||||
}
|
}
|
||||||
return malloc(n);
|
v = malloc(n);
|
||||||
|
mlookup(v, nil, nil, &ref);
|
||||||
|
*ref = RefStack;
|
||||||
|
return v;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -91,7 +91,7 @@ typedef uintptr PageID; // address >> PageShift
|
|||||||
enum
|
enum
|
||||||
{
|
{
|
||||||
// Tunable constants.
|
// Tunable constants.
|
||||||
NumSizeClasses = 133, // Number of size classes (must match msize.c)
|
NumSizeClasses = 150, // Number of size classes (must match msize.c)
|
||||||
MaxSmallSize = 32<<10,
|
MaxSmallSize = 32<<10,
|
||||||
|
|
||||||
FixAllocChunk = 128<<10, // Chunk size for FixAlloc
|
FixAllocChunk = 128<<10, // Chunk size for FixAlloc
|
||||||
@ -152,6 +152,9 @@ struct MStats
|
|||||||
uint64 alloc;
|
uint64 alloc;
|
||||||
uint64 sys;
|
uint64 sys;
|
||||||
uint64 stacks;
|
uint64 stacks;
|
||||||
|
uint64 inuse_pages; // protected by mheap.Lock
|
||||||
|
uint64 next_gc; // protected by mheap.Lock
|
||||||
|
bool enablegc;
|
||||||
};
|
};
|
||||||
extern MStats mstats;
|
extern MStats mstats;
|
||||||
|
|
||||||
@ -212,6 +215,10 @@ struct MSpan
|
|||||||
uint32 ref; // number of allocated objects in this span
|
uint32 ref; // number of allocated objects in this span
|
||||||
uint32 sizeclass; // size class
|
uint32 sizeclass; // size class
|
||||||
uint32 state; // MSpanInUse or MSpanFree
|
uint32 state; // MSpanInUse or MSpanFree
|
||||||
|
union {
|
||||||
|
uint32 *gcref; // sizeclass > 0
|
||||||
|
uint32 gcref0; // sizeclass == 0
|
||||||
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
void MSpan_Init(MSpan *span, PageID start, uintptr npages);
|
void MSpan_Init(MSpan *span, PageID start, uintptr npages);
|
||||||
@ -292,6 +299,7 @@ struct MHeapMapNode3
|
|||||||
void MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
|
void MHeapMap_Init(MHeapMap *m, void *(*allocator)(uintptr));
|
||||||
bool MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
|
bool MHeapMap_Preallocate(MHeapMap *m, PageID k, uintptr npages);
|
||||||
MSpan* MHeapMap_Get(MHeapMap *m, PageID k);
|
MSpan* MHeapMap_Get(MHeapMap *m, PageID k);
|
||||||
|
MSpan* MHeapMap_GetMaybe(MHeapMap *m, PageID k);
|
||||||
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
void MHeapMap_Set(MHeapMap *m, PageID k, MSpan *v);
|
||||||
|
|
||||||
|
|
||||||
@ -364,7 +372,19 @@ void MHeap_Init(MHeap *h, void *(*allocator)(uintptr));
|
|||||||
MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass);
|
MSpan* MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass);
|
||||||
void MHeap_Free(MHeap *h, MSpan *s);
|
void MHeap_Free(MHeap *h, MSpan *s);
|
||||||
MSpan* MHeap_Lookup(MHeap *h, PageID p);
|
MSpan* MHeap_Lookup(MHeap *h, PageID p);
|
||||||
|
MSpan* MHeap_LookupMaybe(MHeap *h, PageID p);
|
||||||
|
|
||||||
|
int32 mlookup(void *v, byte **base, uintptr *size, uint32 **ref);
|
||||||
|
void gc(int32 force);
|
||||||
|
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
RefcountOverhead = 4, // one uint32 per object
|
||||||
|
|
||||||
|
RefFree = 0, // must be zero
|
||||||
|
RefManual, // manual allocation - don't free
|
||||||
|
RefStack, // stack segment - don't free and don't scan for pointers
|
||||||
|
RefNone, // no references
|
||||||
|
RefSome, // some references
|
||||||
|
};
|
||||||
|
|
||||||
void* malloc(uintptr size);
|
|
||||||
void free(void *v);
|
|
||||||
void mlookup(void *v, byte **base, uintptr *size);
|
|
||||||
|
@ -15,9 +15,14 @@ func Free(p *byte) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func Lookup(p *byte) (base *byte, size uintptr) {
|
func Lookup(p *byte) (base *byte, size uintptr) {
|
||||||
mlookup(p, &base, &size);
|
mlookup(p, &base, &size, nil);
|
||||||
}
|
}
|
||||||
|
|
||||||
func GetStats() (s *MStats) {
|
func GetStats() (s *MStats) {
|
||||||
s = &mstats;
|
s = &mstats;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func GC() {
|
||||||
|
gc(1);
|
||||||
|
}
|
||||||
|
|
||||||
|
@ -157,9 +157,9 @@ MCentral_Free(MCentral *c, void *v)
|
|||||||
static bool
|
static bool
|
||||||
MCentral_Grow(MCentral *c)
|
MCentral_Grow(MCentral *c)
|
||||||
{
|
{
|
||||||
int32 n, npages, size;
|
int32 i, n, npages, size;
|
||||||
MLink **tailp, *v;
|
MLink **tailp, *v;
|
||||||
byte *p, *end;
|
byte *p;
|
||||||
MSpan *s;
|
MSpan *s;
|
||||||
|
|
||||||
unlock(c);
|
unlock(c);
|
||||||
@ -174,14 +174,14 @@ MCentral_Grow(MCentral *c)
|
|||||||
// Carve span into sequence of blocks.
|
// Carve span into sequence of blocks.
|
||||||
tailp = &s->freelist;
|
tailp = &s->freelist;
|
||||||
p = (byte*)(s->start << PageShift);
|
p = (byte*)(s->start << PageShift);
|
||||||
end = p + (npages << PageShift);
|
|
||||||
size = class_to_size[c->sizeclass];
|
size = class_to_size[c->sizeclass];
|
||||||
n = 0;
|
n = (npages << PageShift) / (size + RefcountOverhead);
|
||||||
for(; p + size <= end; p += size) {
|
s->gcref = (uint32*)(p + size*n);
|
||||||
|
for(i=0; i<n; i++) {
|
||||||
v = (MLink*)p;
|
v = (MLink*)p;
|
||||||
*tailp = v;
|
*tailp = v;
|
||||||
tailp = &v->next;
|
tailp = &v->next;
|
||||||
n++;
|
p += size;
|
||||||
}
|
}
|
||||||
*tailp = nil;
|
*tailp = nil;
|
||||||
|
|
||||||
|
246
src/runtime/mgc0.c
Normal file
246
src/runtime/mgc0.c
Normal file
@ -0,0 +1,246 @@
|
|||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// Garbage collector -- step 0.
|
||||||
|
//
|
||||||
|
// Stop the world, mark and sweep garbage collector.
|
||||||
|
// NOT INTENDED FOR PRODUCTION USE.
|
||||||
|
//
|
||||||
|
// A mark and sweep collector provides a way to exercise
|
||||||
|
// and test the memory allocator and the stack walking machinery
|
||||||
|
// without also needing to get reference counting
|
||||||
|
// exactly right.
|
||||||
|
|
||||||
|
#include "runtime.h"
|
||||||
|
#include "malloc.h"
|
||||||
|
|
||||||
|
enum {
|
||||||
|
Debug = 0
|
||||||
|
};
|
||||||
|
|
||||||
|
extern byte etext[];
|
||||||
|
extern byte end[];
|
||||||
|
|
||||||
|
static void
|
||||||
|
scanblock(int32 depth, byte *b, int64 n)
|
||||||
|
{
|
||||||
|
int32 off;
|
||||||
|
void *obj;
|
||||||
|
uintptr size;
|
||||||
|
uint32 *ref;
|
||||||
|
void **vp;
|
||||||
|
int64 i;
|
||||||
|
|
||||||
|
if(Debug)
|
||||||
|
printf("%d scanblock %p %D\n", depth, b, n);
|
||||||
|
off = (uint32)(uintptr)b & 7;
|
||||||
|
if(off) {
|
||||||
|
b += 8 - off;
|
||||||
|
n -= 8 - off;
|
||||||
|
}
|
||||||
|
|
||||||
|
vp = (void**)b;
|
||||||
|
n /= 8;
|
||||||
|
for(i=0; i<n; i++) {
|
||||||
|
if(mlookup(vp[i], &obj, &size, &ref)) {
|
||||||
|
if(*ref == RefFree || *ref == RefStack)
|
||||||
|
continue;
|
||||||
|
if(*ref == RefNone) {
|
||||||
|
if(Debug)
|
||||||
|
printf("%d found at %p: ", depth, &vp[i]);
|
||||||
|
*ref = RefSome;
|
||||||
|
scanblock(depth+1, obj, size);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
scanstack(G *g)
|
||||||
|
{
|
||||||
|
Stktop *stk;
|
||||||
|
byte *sp;
|
||||||
|
|
||||||
|
sp = g->sched.SP;
|
||||||
|
stk = (Stktop*)g->stackbase;
|
||||||
|
while(stk) {
|
||||||
|
scanblock(0, sp, (byte*)stk - sp);
|
||||||
|
sp = stk->oldsp;
|
||||||
|
stk = (Stktop*)stk->oldbase;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
mark(void)
|
||||||
|
{
|
||||||
|
G *gp;
|
||||||
|
|
||||||
|
// mark data+bss
|
||||||
|
scanblock(0, etext, end - etext);
|
||||||
|
|
||||||
|
// mark stacks
|
||||||
|
for(gp=allg; gp!=nil; gp=gp->alllink) {
|
||||||
|
switch(gp->status){
|
||||||
|
default:
|
||||||
|
printf("unexpected G.status %d\n", gp->status);
|
||||||
|
throw("mark - bad status");
|
||||||
|
case Gdead:
|
||||||
|
break;
|
||||||
|
case Grunning:
|
||||||
|
if(gp != g)
|
||||||
|
throw("mark - world not stopped");
|
||||||
|
scanstack(gp);
|
||||||
|
break;
|
||||||
|
case Grunnable:
|
||||||
|
case Gsyscall:
|
||||||
|
case Gwaiting:
|
||||||
|
scanstack(gp);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
sweepspan(MSpan *s)
|
||||||
|
{
|
||||||
|
int32 i, n, npages, size;
|
||||||
|
byte *p;
|
||||||
|
|
||||||
|
if(s->state != MSpanInUse)
|
||||||
|
return;
|
||||||
|
|
||||||
|
p = (byte*)(s->start << PageShift);
|
||||||
|
if(s->sizeclass == 0) {
|
||||||
|
// Large block.
|
||||||
|
switch(s->gcref0) {
|
||||||
|
default:
|
||||||
|
throw("bad 'ref count'");
|
||||||
|
case RefFree:
|
||||||
|
case RefManual:
|
||||||
|
case RefStack:
|
||||||
|
break;
|
||||||
|
case RefNone:
|
||||||
|
if(Debug)
|
||||||
|
printf("free %D at %p\n", s->npages<<PageShift, p);
|
||||||
|
free(p);
|
||||||
|
break;
|
||||||
|
case RefSome:
|
||||||
|
s->gcref0 = RefNone; // set up for next mark phase
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Chunk full of small blocks.
|
||||||
|
// Must match computation in MCentral_Grow.
|
||||||
|
size = class_to_size[s->sizeclass];
|
||||||
|
npages = class_to_allocnpages[s->sizeclass];
|
||||||
|
n = (npages << PageShift) / (size + RefcountOverhead);
|
||||||
|
for(i=0; i<n; i++) {
|
||||||
|
switch(s->gcref[i]) {
|
||||||
|
default:
|
||||||
|
throw("bad 'ref count'");
|
||||||
|
case RefFree:
|
||||||
|
case RefManual:
|
||||||
|
case RefStack:
|
||||||
|
break;
|
||||||
|
case RefNone:
|
||||||
|
if(Debug)
|
||||||
|
printf("free %d at %p\n", size, p+i*size);
|
||||||
|
free(p + i*size);
|
||||||
|
break;
|
||||||
|
case RefSome:
|
||||||
|
s->gcref[i] = RefNone; // set up for next mark phase
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
sweepspanlist(MSpan *list)
|
||||||
|
{
|
||||||
|
MSpan *s, *next;
|
||||||
|
|
||||||
|
for(s=list->next; s != list; s=next) {
|
||||||
|
next = s->next; // in case s gets moved
|
||||||
|
sweepspan(s);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
sweep(void)
|
||||||
|
{
|
||||||
|
int32 i;
|
||||||
|
|
||||||
|
// Sweep all the spans.
|
||||||
|
|
||||||
|
for(i=0; i<nelem(mheap.central); i++) {
|
||||||
|
// Sweep nonempty (has some free blocks available)
|
||||||
|
// before sweeping empty (is completely allocated),
|
||||||
|
// because finding something to free in a span from empty
|
||||||
|
// will move it into nonempty, and we must not sweep
|
||||||
|
// the same span twice.
|
||||||
|
sweepspanlist(&mheap.central[i].nonempty);
|
||||||
|
sweepspanlist(&mheap.central[i].empty);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Semaphore, not Lock, so that the goroutine
|
||||||
|
// reschedules when there is contention rather
|
||||||
|
// than spinning.
|
||||||
|
static uint32 gcsema = 1;
|
||||||
|
|
||||||
|
// Initialized from $GOGC. GOGC=off means no gc.
|
||||||
|
//
|
||||||
|
// Next gc is after we've allocated an extra amount of
|
||||||
|
// memory proportional to the amount already in use.
|
||||||
|
// If gcpercent=100 and we're using 4M, we'll gc again
|
||||||
|
// when we get to 8M. This keeps the gc cost in linear
|
||||||
|
// proportion to the allocation cost. Adjusting gcpercent
|
||||||
|
// just changes the linear constant (and also the amount of
|
||||||
|
// extra memory used).
|
||||||
|
static int32 gcpercent = -2;
|
||||||
|
|
||||||
|
void
|
||||||
|
gc(int32 force)
|
||||||
|
{
|
||||||
|
byte *p;
|
||||||
|
|
||||||
|
// The gc is turned off (via enablegc) until
|
||||||
|
// the bootstrap has completed.
|
||||||
|
// Also, malloc gets called in the guts
|
||||||
|
// of a number of libraries that might be
|
||||||
|
// holding locks. To avoid priority inversion
|
||||||
|
// problems, don't bother trying to run gc
|
||||||
|
// while holding a lock. The next mallocgc
|
||||||
|
// without a lock will do the gc instead.
|
||||||
|
if(!mstats.enablegc || m->locks > 0 || panicking)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if(gcpercent == -2) { // first time through
|
||||||
|
p = getenv("GOGC");
|
||||||
|
if(p == nil || p[0] == '\0')
|
||||||
|
gcpercent = 100;
|
||||||
|
else if(strcmp(p, (byte*)"off") == 0)
|
||||||
|
gcpercent = -1;
|
||||||
|
else
|
||||||
|
gcpercent = atoi(p);
|
||||||
|
}
|
||||||
|
if(gcpercent < 0)
|
||||||
|
return;
|
||||||
|
|
||||||
|
semacquire(&gcsema);
|
||||||
|
gosave(&g->sched); // update g's stack pointer for scanstack
|
||||||
|
stoptheworld();
|
||||||
|
if(mheap.Lock.key != 0)
|
||||||
|
throw("mheap locked during gc");
|
||||||
|
if(force || mstats.inuse_pages >= mstats.next_gc) {
|
||||||
|
mark();
|
||||||
|
sweep();
|
||||||
|
mstats.next_gc = mstats.inuse_pages+mstats.inuse_pages*gcpercent/100;
|
||||||
|
}
|
||||||
|
starttheworld();
|
||||||
|
gosave(&g->sched); // update g's stack pointer for debugging
|
||||||
|
semrelease(&gcsema);
|
||||||
|
}
|
@ -47,6 +47,8 @@ MHeap_Alloc(MHeap *h, uintptr npage, int32 sizeclass)
|
|||||||
|
|
||||||
lock(h);
|
lock(h);
|
||||||
s = MHeap_AllocLocked(h, npage, sizeclass);
|
s = MHeap_AllocLocked(h, npage, sizeclass);
|
||||||
|
if(s != nil)
|
||||||
|
mstats.inuse_pages += npage;
|
||||||
unlock(h);
|
unlock(h);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -108,6 +110,11 @@ HaveSpan:
|
|||||||
for(n=0; n<npage; n++)
|
for(n=0; n<npage; n++)
|
||||||
if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0)
|
if(MHeapMapCache_GET(&h->mapcache, s->start+n, tmp) != 0)
|
||||||
MHeapMapCache_SET(&h->mapcache, s->start+n, 0);
|
MHeapMapCache_SET(&h->mapcache, s->start+n, 0);
|
||||||
|
|
||||||
|
// Need a list of large allocated spans.
|
||||||
|
// They have sizeclass == 0, so use heap.central[0].empty,
|
||||||
|
// since central[0] is otherwise unused.
|
||||||
|
MSpanList_Insert(&h->central[0].empty, s);
|
||||||
} else {
|
} else {
|
||||||
// Save cache entries for this span.
|
// Save cache entries for this span.
|
||||||
// If there's a size class, there aren't that many pages.
|
// If there's a size class, there aren't that many pages.
|
||||||
@ -191,17 +198,38 @@ MHeap_Grow(MHeap *h, uintptr npage)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Look up the span at the given page number.
|
// Look up the span at the given page number.
|
||||||
|
// Page number is guaranteed to be in map
|
||||||
|
// and is guaranteed to be start or end of span.
|
||||||
MSpan*
|
MSpan*
|
||||||
MHeap_Lookup(MHeap *h, PageID p)
|
MHeap_Lookup(MHeap *h, PageID p)
|
||||||
{
|
{
|
||||||
return MHeapMap_Get(&h->map, p);
|
return MHeapMap_Get(&h->map, p);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Look up the span at the given page number.
|
||||||
|
// Page number is *not* guaranteed to be in map
|
||||||
|
// and may be anywhere in the span.
|
||||||
|
// Map entries for the middle of a span are only
|
||||||
|
// valid for allocated spans. Free spans may have
|
||||||
|
// other garbage in their middles, so we have to
|
||||||
|
// check for that.
|
||||||
|
MSpan*
|
||||||
|
MHeap_LookupMaybe(MHeap *h, PageID p)
|
||||||
|
{
|
||||||
|
MSpan *s;
|
||||||
|
|
||||||
|
s = MHeapMap_GetMaybe(&h->map, p);
|
||||||
|
if(s == nil || p < s->start || p - s->start >= s->npages)
|
||||||
|
return nil;
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
// Free the span back into the heap.
|
// Free the span back into the heap.
|
||||||
void
|
void
|
||||||
MHeap_Free(MHeap *h, MSpan *s)
|
MHeap_Free(MHeap *h, MSpan *s)
|
||||||
{
|
{
|
||||||
lock(h);
|
lock(h);
|
||||||
|
mstats.inuse_pages -= s->npages;
|
||||||
MHeap_FreeLocked(h, s);
|
MHeap_FreeLocked(h, s);
|
||||||
unlock(h);
|
unlock(h);
|
||||||
}
|
}
|
||||||
@ -266,6 +294,31 @@ MHeapMap_Get(MHeapMap *m, PageID k)
|
|||||||
return m->p[i1]->p[i2]->s[i3];
|
return m->p[i1]->p[i2]->s[i3];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MSpan*
|
||||||
|
MHeapMap_GetMaybe(MHeapMap *m, PageID k)
|
||||||
|
{
|
||||||
|
int32 i1, i2, i3;
|
||||||
|
MHeapMapNode2 *p2;
|
||||||
|
MHeapMapNode3 *p3;
|
||||||
|
|
||||||
|
i3 = k & MHeapMap_Level3Mask;
|
||||||
|
k >>= MHeapMap_Level3Bits;
|
||||||
|
i2 = k & MHeapMap_Level2Mask;
|
||||||
|
k >>= MHeapMap_Level2Bits;
|
||||||
|
i1 = k & MHeapMap_Level1Mask;
|
||||||
|
k >>= MHeapMap_Level1Bits;
|
||||||
|
if(k != 0)
|
||||||
|
throw("MHeapMap_Get");
|
||||||
|
|
||||||
|
p2 = m->p[i1];
|
||||||
|
if(p2 == nil)
|
||||||
|
return nil;
|
||||||
|
p3 = p2->p[i2];
|
||||||
|
if(p3 == nil)
|
||||||
|
return nil;
|
||||||
|
return p3->s[i3];
|
||||||
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
|
MHeapMap_Set(MHeapMap *m, PageID k, MSpan *s)
|
||||||
{
|
{
|
||||||
|
@ -57,7 +57,7 @@ SizeToClass(int32 size)
|
|||||||
void
|
void
|
||||||
InitSizes(void)
|
InitSizes(void)
|
||||||
{
|
{
|
||||||
int32 align, sizeclass, size, nextsize, n;
|
int32 align, sizeclass, size, osize, nextsize, n;
|
||||||
uint32 i;
|
uint32 i;
|
||||||
uintptr allocsize, npages;
|
uintptr allocsize, npages;
|
||||||
|
|
||||||
@ -81,7 +81,8 @@ InitSizes(void)
|
|||||||
// the leftover is less than 1/8 of the total,
|
// the leftover is less than 1/8 of the total,
|
||||||
// so wasted space is at most 12.5%.
|
// so wasted space is at most 12.5%.
|
||||||
allocsize = PageSize;
|
allocsize = PageSize;
|
||||||
while(allocsize%size > (PageSize/8))
|
osize = size + RefcountOverhead;
|
||||||
|
while(allocsize%osize > (PageSize/8))
|
||||||
allocsize += PageSize;
|
allocsize += PageSize;
|
||||||
npages = allocsize >> PageShift;
|
npages = allocsize >> PageShift;
|
||||||
|
|
||||||
@ -92,7 +93,7 @@ InitSizes(void)
|
|||||||
// different sizes.
|
// different sizes.
|
||||||
if(sizeclass > 1
|
if(sizeclass > 1
|
||||||
&& npages == class_to_allocnpages[sizeclass-1]
|
&& npages == class_to_allocnpages[sizeclass-1]
|
||||||
&& allocsize/size == allocsize/class_to_size[sizeclass-1]) {
|
&& allocsize/osize == allocsize/(class_to_size[sizeclass-1]+RefcountOverhead)) {
|
||||||
class_to_size[sizeclass-1] = size;
|
class_to_size[sizeclass-1] = size;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
#include "runtime.h"
|
#include "runtime.h"
|
||||||
#include "malloc.h" /* so that acid generated from proc.c includes malloc data structures */
|
#include "malloc.h"
|
||||||
|
|
||||||
typedef struct Sched Sched;
|
typedef struct Sched Sched;
|
||||||
|
|
||||||
@ -118,6 +118,7 @@ initdone(void)
|
|||||||
{
|
{
|
||||||
// Let's go.
|
// Let's go.
|
||||||
sched.predawn = 0;
|
sched.predawn = 0;
|
||||||
|
mstats.enablegc = 1;
|
||||||
|
|
||||||
// If main·init_function started other goroutines,
|
// If main·init_function started other goroutines,
|
||||||
// kick off new ms to handle them, like ready
|
// kick off new ms to handle them, like ready
|
||||||
@ -146,7 +147,7 @@ malg(int32 stacksize)
|
|||||||
byte *stk;
|
byte *stk;
|
||||||
|
|
||||||
// 160 is the slop amount known to the stack growth code
|
// 160 is the slop amount known to the stack growth code
|
||||||
g = mal(sizeof(G));
|
g = malloc(sizeof(G));
|
||||||
stk = stackalloc(160 + stacksize);
|
stk = stackalloc(160 + stacksize);
|
||||||
g->stack0 = stk;
|
g->stack0 = stk;
|
||||||
g->stackguard = stk + 160;
|
g->stackguard = stk + 160;
|
||||||
@ -444,7 +445,7 @@ matchmg(void)
|
|||||||
m->nextg = g;
|
m->nextg = g;
|
||||||
notewakeup(&m->havenextg);
|
notewakeup(&m->havenextg);
|
||||||
}else{
|
}else{
|
||||||
m = mal(sizeof(M));
|
m = malloc(sizeof(M));
|
||||||
m->g0 = malg(8192);
|
m->g0 = malg(8192);
|
||||||
m->nextg = g;
|
m->nextg = g;
|
||||||
m->id = sched.mcount++;
|
m->id = sched.mcount++;
|
||||||
@ -525,6 +526,8 @@ scheduler(void)
|
|||||||
void
|
void
|
||||||
sys·Gosched(void)
|
sys·Gosched(void)
|
||||||
{
|
{
|
||||||
|
if(g == m->g0)
|
||||||
|
throw("gosched of g0");
|
||||||
if(gosave(&g->sched) == 0){
|
if(gosave(&g->sched) == 0){
|
||||||
g = m->g0;
|
g = m->g0;
|
||||||
gogo(&m->sched);
|
gogo(&m->sched);
|
||||||
|
@ -130,6 +130,7 @@ sighandler(int32 sig, struct siginfo *info, void *context)
|
|||||||
{
|
{
|
||||||
if(panicking) // traceback already printed
|
if(panicking) // traceback already printed
|
||||||
sys_Exit(2);
|
sys_Exit(2);
|
||||||
|
panicking = 1;
|
||||||
|
|
||||||
_STRUCT_MCONTEXT64 *uc_mcontext = get_uc_mcontext(context);
|
_STRUCT_MCONTEXT64 *uc_mcontext = get_uc_mcontext(context);
|
||||||
_STRUCT_X86_THREAD_STATE64 *ss = get___ss(uc_mcontext);
|
_STRUCT_X86_THREAD_STATE64 *ss = get___ss(uc_mcontext);
|
||||||
@ -282,11 +283,13 @@ lock(Lock *l)
|
|||||||
|
|
||||||
if(xadd(&l->key, 1) > 1) // someone else has it; wait
|
if(xadd(&l->key, 1) > 1) // someone else has it; wait
|
||||||
mach_semacquire(l->sema);
|
mach_semacquire(l->sema);
|
||||||
|
m->locks++;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
unlock(Lock *l)
|
unlock(Lock *l)
|
||||||
{
|
{
|
||||||
|
m->locks--;
|
||||||
if(xadd(&l->key, -1) > 0) // someone else is waiting
|
if(xadd(&l->key, -1) > 0) // someone else is waiting
|
||||||
mach_semrelease(l->sema);
|
mach_semrelease(l->sema);
|
||||||
}
|
}
|
||||||
|
@ -306,6 +306,8 @@ lock(Lock *l)
|
|||||||
{
|
{
|
||||||
uint32 v;
|
uint32 v;
|
||||||
|
|
||||||
|
m->locks++;
|
||||||
|
|
||||||
again:
|
again:
|
||||||
v = l->key;
|
v = l->key;
|
||||||
if((v&1) == 0){
|
if((v&1) == 0){
|
||||||
@ -349,6 +351,8 @@ unlock(Lock *l)
|
|||||||
{
|
{
|
||||||
uint32 v;
|
uint32 v;
|
||||||
|
|
||||||
|
m->locks--;
|
||||||
|
|
||||||
// Atomically get value and clear lock bit.
|
// Atomically get value and clear lock bit.
|
||||||
again:
|
again:
|
||||||
v = l->key;
|
v = l->key;
|
||||||
|
@ -147,21 +147,25 @@ args(int32 c, uint8 **v)
|
|||||||
void
|
void
|
||||||
goargs(void)
|
goargs(void)
|
||||||
{
|
{
|
||||||
string* goargv;
|
string *gargv;
|
||||||
string* envv;
|
string *genvv;
|
||||||
int32 i, envc;
|
int32 i, envc;
|
||||||
|
|
||||||
goargv = (string*)argv;
|
for(envc=0; argv[argc+1+envc] != 0; envc++)
|
||||||
for (i=0; i<argc; i++)
|
;
|
||||||
goargv[i] = gostring(argv[i]);
|
|
||||||
sys·Args.array = (byte*)argv;
|
gargv = malloc(argc*sizeof gargv[0]);
|
||||||
|
genvv = malloc(envc*sizeof genvv[0]);
|
||||||
|
|
||||||
|
for(i=0; i<argc; i++)
|
||||||
|
gargv[i] = gostring(argv[i]);
|
||||||
|
sys·Args.array = (byte*)gargv;
|
||||||
sys·Args.nel = argc;
|
sys·Args.nel = argc;
|
||||||
sys·Args.cap = argc;
|
sys·Args.cap = argc;
|
||||||
|
|
||||||
envv = goargv + argc + 1; // skip 0 at end of argv
|
for(i=0; i<envc; i++)
|
||||||
for (envc = 0; envv[envc] != 0; envc++)
|
genvv[i] = gostring(argv[argc+1+i]);
|
||||||
envv[envc] = gostring((uint8*)envv[envc]);
|
sys·Envs.array = (byte*)genvv;
|
||||||
sys·Envs.array = (byte*)envv;
|
|
||||||
sys·Envs.nel = envc;
|
sys·Envs.nel = envc;
|
||||||
sys·Envs.cap = envc;
|
sys·Envs.cap = envc;
|
||||||
}
|
}
|
||||||
|
@ -162,6 +162,7 @@ struct M
|
|||||||
int32 siz2;
|
int32 siz2;
|
||||||
int32 id;
|
int32 id;
|
||||||
int32 mallocing;
|
int32 mallocing;
|
||||||
|
int32 locks;
|
||||||
Note havenextg;
|
Note havenextg;
|
||||||
G* nextg;
|
G* nextg;
|
||||||
M* schedlink;
|
M* schedlink;
|
||||||
@ -304,6 +305,9 @@ bool ifaceeq(Iface, Iface);
|
|||||||
uint64 ifacehash(Iface);
|
uint64 ifacehash(Iface);
|
||||||
uint64 nohash(uint32, void*);
|
uint64 nohash(uint32, void*);
|
||||||
uint32 noequal(uint32, void*, void*);
|
uint32 noequal(uint32, void*, void*);
|
||||||
|
void* malloc(uintptr size);
|
||||||
|
void* mallocgc(uintptr size);
|
||||||
|
void free(void *v);
|
||||||
|
|
||||||
#pragma varargck argpos printf 1
|
#pragma varargck argpos printf 1
|
||||||
|
|
||||||
|
25
test/gc.go
Normal file
25
test/gc.go
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
// $G $F.go && $L $F.$A && ./$A.out
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
import "malloc"
|
||||||
|
|
||||||
|
func mk2() {
|
||||||
|
b := new([10000]byte);
|
||||||
|
// println(b, "stored at", &b);
|
||||||
|
}
|
||||||
|
|
||||||
|
func mk1() {
|
||||||
|
mk2();
|
||||||
|
}
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
mk1();
|
||||||
|
malloc.GC();
|
||||||
|
}
|
||||||
|
}
|
13
test/gc1.go
Normal file
13
test/gc1.go
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
// $G $F.go && $L $F.$A && ./$A.out
|
||||||
|
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package main
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
for i := 0; i < 1000000; i++ {
|
||||||
|
x := new([100]byte);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user