1
0
mirror of https://github.com/golang/go synced 2024-11-21 22:34:48 -07:00

runtime: record type information for hashtable internal structures.

Remove all hashtable-specific GC code.

Fixes bug 6119.

R=cshapiro, dvyukov, khr
CC=golang-dev
https://golang.org/cl/13078044
This commit is contained in:
Keith Randall 2013-08-31 09:09:50 -07:00
parent d0206101c8
commit fb376021be
9 changed files with 304 additions and 403 deletions

View File

@ -186,6 +186,10 @@ struct Type
// TARRAY
vlong bound; // negative is dynamic array
// TMAP
Type* bucket; // internal type representing a hash bucket
Type* hmap; // internal type representing a Hmap (map header object)
int32 maplineno; // first use of TFORW as map key
int32 embedlineno; // first use of TFORW as embedded type

View File

@ -101,6 +101,135 @@ lsort(Sig *l, int(*f)(Sig*, Sig*))
return l;
}
// Builds a type respresenting a Bucket structure for
// the given map type. This type is not visible to users -
// we include only enough information to generate a correct GC
// program for it.
// Make sure this stays in sync with ../../pkg/runtime/hashmap.c!
enum {
BUCKETSIZE = 8,
MAXKEYSIZE = 128,
MAXVALSIZE = 128,
};
static Type*
mapbucket(Type *t)
{
Type *keytype, *valtype;
Type *bucket;
Type *overflowfield, *keysfield, *valuesfield;
int32 offset;
if(t->bucket != T)
return t->bucket;
keytype = t->down;
valtype = t->type;
if(keytype->width > MAXKEYSIZE)
keytype = ptrto(keytype);
if(valtype->width > MAXVALSIZE)
valtype = ptrto(valtype);
bucket = typ(TSTRUCT);
// The first field is: uint8 topbits[BUCKETSIZE].
// We don't need to encode it as GC doesn't care about it.
offset = BUCKETSIZE * 1;
overflowfield = typ(TFIELD);
overflowfield->type = ptrto(bucket);
overflowfield->width = offset; // "width" is offset in structure
overflowfield->sym = mal(sizeof(Sym)); // not important but needs to be set to give this type a name
overflowfield->sym->name = "overflow";
offset += widthptr;
keysfield = typ(TFIELD);
keysfield->type = typ(TARRAY);
keysfield->type->type = keytype;
keysfield->type->bound = BUCKETSIZE;
keysfield->type->width = BUCKETSIZE * keytype->width;
keysfield->width = offset;
keysfield->sym = mal(sizeof(Sym));
keysfield->sym->name = "keys";
offset += BUCKETSIZE * keytype->width;
valuesfield = typ(TFIELD);
valuesfield->type = typ(TARRAY);
valuesfield->type->type = valtype;
valuesfield->type->bound = BUCKETSIZE;
valuesfield->type->width = BUCKETSIZE * valtype->width;
valuesfield->width = offset;
valuesfield->sym = mal(sizeof(Sym));
valuesfield->sym->name = "values";
offset += BUCKETSIZE * valtype->width;
// link up fields
bucket->type = overflowfield;
overflowfield->down = keysfield;
keysfield->down = valuesfield;
valuesfield->down = T;
bucket->width = offset;
bucket->local = t->local;
t->bucket = bucket;
return bucket;
}
// Builds a type respresenting a Hmap structure for
// the given map type. This type is not visible to users -
// we include only enough information to generate a correct GC
// program for it.
// Make sure this stays in sync with ../../pkg/runtime/hashmap.c!
static Type*
hmap(Type *t)
{
Type *h, *bucket;
Type *bucketsfield, *oldbucketsfield;
int32 offset;
if(t->hmap != T)
return t->hmap;
bucket = mapbucket(t);
h = typ(TSTRUCT);
offset = widthint; // count
offset += 4; // flags
offset += 4; // hash0
offset += 1; // B
offset += 1; // keysize
offset += 1; // valuesize
offset = (offset + 1) / 2 * 2;
offset += 2; // bucketsize
offset = (offset + widthptr - 1) / widthptr * widthptr;
bucketsfield = typ(TFIELD);
bucketsfield->type = ptrto(bucket);
bucketsfield->width = offset;
bucketsfield->sym = mal(sizeof(Sym));
bucketsfield->sym->name = "buckets";
offset += widthptr;
oldbucketsfield = typ(TFIELD);
oldbucketsfield->type = ptrto(bucket);
oldbucketsfield->width = offset;
oldbucketsfield->sym = mal(sizeof(Sym));
oldbucketsfield->sym->name = "oldbuckets";
offset += widthptr;
offset += widthptr; // nevacuate (last field in Hmap)
// link up fields
h->type = bucketsfield;
bucketsfield->down = oldbucketsfield;
oldbucketsfield->down = T;
h->width = offset;
h->local = t->local;
t->hmap = h;
return h;
}
/*
* f is method type, with receiver.
* return function type, receiver as first argument (or not).
@ -715,7 +844,7 @@ static Sym*
dtypesym(Type *t)
{
int ot, xt, n, isddd, dupok;
Sym *s, *s1, *s2, *slink;
Sym *s, *s1, *s2, *s3, *s4, *slink;
Sig *a, *m;
Type *t1, *tbase, *t2;
@ -855,10 +984,14 @@ ok:
// ../../pkg/runtime/type.go:/MapType
s1 = dtypesym(t->down);
s2 = dtypesym(t->type);
s3 = dtypesym(mapbucket(t));
s4 = dtypesym(hmap(t));
ot = dcommontype(s, ot, t);
xt = ot - 2*widthptr;
ot = dsymptr(s, ot, s1, 0);
ot = dsymptr(s, ot, s2, 0);
ot = dsymptr(s, ot, s3, 0);
ot = dsymptr(s, ot, s4, 0);
break;
case TPTR32:
@ -1118,9 +1251,9 @@ dgcsym1(Sym *s, int ot, Type *t, vlong *off, int stack_size)
// NOTE: Any changes here need to be made to reflect.MapOf as well.
if(*off % widthptr != 0)
fatal("dgcsym1: invalid alignment, %T", t);
ot = duintptr(s, ot, GC_MAP_PTR);
ot = duintptr(s, ot, GC_PTR);
ot = duintptr(s, ot, *off);
ot = dsymptr(s, ot, dtypesym(t), 0);
ot = dsymptr(s, ot, dgcsym(hmap(t)), 0);
*off += t->width;
break;

View File

@ -313,9 +313,11 @@ type interfaceType struct {
// mapType represents a map type.
type mapType struct {
rtype `reflect:"map"`
key *rtype // map key type
elem *rtype // map element (value) type
rtype `reflect:"map"`
key *rtype // map key type
elem *rtype // map element (value) type
bucket *rtype // internal bucket structure
hmap *rtype // internal map header
}
// ptrType represents a pointer type.
@ -354,7 +356,6 @@ const (
_GC_ARRAY_START
_GC_ARRAY_NEXT
_GC_CALL
_GC_MAP_PTR
_GC_CHAN_PTR
_GC_STRING
_GC_EFACE
@ -1400,11 +1401,11 @@ func cachePut(k cacheKey, t *rtype) Type {
return t
}
// garbage collection bytecode program for chan or map.
// garbage collection bytecode program for chan.
// See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
type chanMapGC struct {
type chanGC struct {
width uintptr // sizeof(map)
op uintptr // _GC_MAP_PTR or _GC_CHAN_PTR
op uintptr // _GC_CHAN_PTR
off uintptr // 0
typ *rtype // map type
end uintptr // _GC_END
@ -1467,7 +1468,7 @@ func ChanOf(dir ChanDir, t Type) Type {
ch.uncommonType = nil
ch.ptrToThis = nil
ch.gc = unsafe.Pointer(&chanMapGC{
ch.gc = unsafe.Pointer(&chanGC{
width: ch.size,
op: _GC_CHAN_PTR,
off: 0,
@ -1521,17 +1522,11 @@ func MapOf(key, elem Type) Type {
mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
mt.key = ktyp
mt.elem = etyp
mt.bucket = bucketOf(ktyp, etyp)
mt.hmap = hMapOf(mt.bucket)
mt.uncommonType = nil
mt.ptrToThis = nil
mt.gc = unsafe.Pointer(&chanMapGC{
width: mt.size,
op: _GC_MAP_PTR,
off: 0,
typ: &mt.rtype,
end: _GC_END,
})
// INCORRECT. Uncomment to check that TestMapOfGC and TestMapOfGCValues
// fail when mt.gc is wrong.
//mt.gc = unsafe.Pointer(&badGC{width: mt.size, end: _GC_END})
@ -1539,6 +1534,117 @@ func MapOf(key, elem Type) Type {
return cachePut(ckey, &mt.rtype)
}
// Make sure these routines stay in sync with ../../pkg/runtime/hashmap.c!
// These types exist only for GC, so we only fill out GC relevant info.
// Currently, that's just size and the GC program. We also fill in string
// for possible debugging use.
const (
BUCKETSIZE = 8
MAXKEYSIZE = 128
MAXVALSIZE = 128
)
func bucketOf(ktyp, etyp *rtype) *rtype {
if ktyp.size > MAXKEYSIZE {
ktyp = PtrTo(ktyp).(*rtype)
}
if etyp.size > MAXVALSIZE {
etyp = PtrTo(etyp).(*rtype)
}
ptrsize := unsafe.Sizeof(uintptr(0))
gc := make([]uintptr, 1) // first entry is size, filled in at the end
offset := BUCKETSIZE * unsafe.Sizeof(uint8(0)) // topbits
gc = append(gc, _GC_PTR, offset, 0 /*self pointer set below*/) // overflow
offset += ptrsize
// keys
if ktyp.kind&kindNoPointers == 0 {
gc = append(gc, _GC_ARRAY_START, offset, BUCKETSIZE, ktyp.size)
gc = appendGCProgram(gc, ktyp)
gc = append(gc, _GC_ARRAY_NEXT)
}
offset += BUCKETSIZE * ktyp.size
// values
if etyp.kind&kindNoPointers == 0 {
gc = append(gc, _GC_ARRAY_START, offset, BUCKETSIZE, etyp.size)
gc = appendGCProgram(gc, etyp)
gc = append(gc, _GC_ARRAY_NEXT)
}
offset += BUCKETSIZE * etyp.size
gc = append(gc, _GC_END)
gc[0] = offset
gc[3] = uintptr(unsafe.Pointer(&gc[0])) // set self pointer
b := new(rtype)
b.size = offset
b.gc = unsafe.Pointer(&gc[0])
s := "bucket(" + *ktyp.string + "," + *etyp.string + ")"
b.string = &s
return b
}
// Take the GC program for "t" and append it to the GC program "gc".
func appendGCProgram(gc []uintptr, t *rtype) []uintptr {
p := t.gc
p = unsafe.Pointer(uintptr(p) + unsafe.Sizeof(uintptr(0))) // skip size
loop:
for {
var argcnt int
switch *(*uintptr)(p) {
case _GC_END:
// Note: _GC_END not included in append
break loop
case _GC_ARRAY_NEXT:
argcnt = 0
case _GC_APTR, _GC_STRING, _GC_EFACE, _GC_IFACE:
argcnt = 1
case _GC_PTR, _GC_CALL, _GC_CHAN_PTR, _GC_SLICE:
argcnt = 2
case _GC_ARRAY_START, _GC_REGION:
argcnt = 3
default:
panic("unknown GC program op for " + *t.string + ": " + strconv.FormatUint(*(*uint64)(p), 10))
}
for i := 0; i < argcnt+1; i++ {
gc = append(gc, *(*uintptr)(p))
p = unsafe.Pointer(uintptr(p) + unsafe.Sizeof(uintptr(0)))
}
}
return gc
}
func hMapOf(bucket *rtype) *rtype {
ptrsize := unsafe.Sizeof(uintptr(0))
// make gc program & compute hmap size
gc := make([]uintptr, 1) // first entry is size, filled in at the end
offset := unsafe.Sizeof(uint(0)) // count
offset += unsafe.Sizeof(uint32(0)) // flags
offset += unsafe.Sizeof(uint32(0)) // hash0
offset += unsafe.Sizeof(uint8(0)) // B
offset += unsafe.Sizeof(uint8(0)) // keysize
offset += unsafe.Sizeof(uint8(0)) // valuesize
offset = (offset + 1) / 2 * 2
offset += unsafe.Sizeof(uint16(0)) // bucketsize
offset = (offset + ptrsize - 1) / ptrsize * ptrsize
gc = append(gc, _GC_PTR, offset, uintptr(bucket.gc)) // buckets
offset += ptrsize
gc = append(gc, _GC_PTR, offset, uintptr(bucket.gc)) // oldbuckets
offset += ptrsize
offset += ptrsize // nevacuate
gc = append(gc, _GC_END)
gc[0] = offset
h := new(rtype)
h.size = offset
h.gc = unsafe.Pointer(&gc[0])
s := "hmap(" + *bucket.string + ")"
h.string = &s
return h
}
// garbage collection bytecode program for slice of non-zero-length values.
// See ../../cmd/gc/reflect.c:/^dgcsym1 and :/^dgcsym.
type sliceGC struct {

View File

@ -75,6 +75,8 @@
typedef struct Bucket Bucket;
struct Bucket
{
// Note: the format of the Bucket is encoded in ../../cmd/gc/reflect.c and
// ../reflect/type.go. Don't change this structure without also changing that code!
uint8 tophash[BUCKETSIZE]; // top 8 bits of hash of each entry (0 = empty)
Bucket *overflow; // overflow bucket, if any
byte data[1]; // BUCKETSIZE keys followed by BUCKETSIZE values
@ -90,14 +92,13 @@ struct Bucket
#define evacuated(b) (((uintptr)(b)->overflow & 1) != 0)
#define overflowptr(b) ((Bucket*)((uintptr)(b)->overflow & ~(uintptr)1))
// Initialize bucket to the empty state. This only works if BUCKETSIZE==8!
#define clearbucket(b) { *(uint64*)((b)->tophash) = 0; (b)->overflow = nil; }
struct Hmap
{
// Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
// ../reflect/type.go. Don't change this structure without also changing that code!
uintgo count; // # live cells == size of map. Must be first (used by len() builtin)
uint32 flags;
uint32 hash0; // hash seed
uint32 hash0; // hash seed
uint8 B; // log_2 of # of buckets (can hold up to LOAD * 2^B items)
uint8 keysize; // key size in bytes
uint8 valuesize; // value size in bytes
@ -115,8 +116,6 @@ enum
IndirectValue = 2, // storing pointers to values
Iterator = 4, // there may be an iterator using buckets
OldIterator = 8, // there may be an iterator using oldbuckets
CanFreeBucket = 16, // ok to free buckets
CanFreeKey = 32, // keys are indirect and ok to free keys
};
// Macros for dereferencing indirect keys
@ -209,17 +208,15 @@ hash_init(MapType *t, Hmap *h, uint32 hint)
{
uint8 B;
byte *buckets;
uintptr i;
uintptr keysize, valuesize, bucketsize;
uint8 flags;
Bucket *b;
flags = CanFreeBucket;
flags = 0;
// figure out how big we have to make everything
keysize = t->key->size;
if(keysize > MAXKEYSIZE) {
flags |= IndirectKey | CanFreeKey;
flags |= IndirectKey;
keysize = sizeof(byte*);
}
valuesize = t->elem->size;
@ -241,8 +238,6 @@ hash_init(MapType *t, Hmap *h, uint32 hint)
runtime·throw("value size not a multiple of value align");
if(BUCKETSIZE < 8)
runtime·throw("bucketsize too small for proper alignment");
if(BUCKETSIZE != 8)
runtime·throw("must redo clearbucket");
if(sizeof(void*) == 4 && t->key->align > 4)
runtime·throw("need padding in bucket (key)");
if(sizeof(void*) == 4 && t->elem->align > 4)
@ -260,16 +255,10 @@ hash_init(MapType *t, Hmap *h, uint32 hint)
// done lazily later.
buckets = nil;
} else {
buckets = runtime·mallocgc(bucketsize << B, 0, FlagNoZero);
for(i = 0; i < (uintptr)1 << B; i++) {
b = (Bucket*)(buckets + i * bucketsize);
clearbucket(b);
}
buckets = runtime·mallocgc(bucketsize << B, (uintptr)t->bucket | TypeInfo_Array, 0);
}
// initialize Hmap
// Note: we save all these stores to the end so gciter doesn't see
// a partially initialized map.
h->count = 0;
h->B = B;
h->flags = flags;
@ -300,19 +289,16 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
uintptr i;
byte *k, *v;
byte *xk, *yk, *xv, *yv;
byte *ob;
b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
newbit = (uintptr)1 << (h->B - 1);
if(!evacuated(b)) {
// TODO: reuse overflow buckets instead of using new ones, if there
// is no iterator using the old buckets. (If CanFreeBuckets and !OldIterator.)
// is no iterator using the old buckets. (If !OldIterator.)
x = (Bucket*)(h->buckets + oldbucket * h->bucketsize);
y = (Bucket*)(h->buckets + (oldbucket + newbit) * h->bucketsize);
clearbucket(x);
clearbucket(y);
xi = 0;
yi = 0;
xk = x->data;
@ -331,8 +317,7 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
if((hash & newbit) == 0) {
if(xi == BUCKETSIZE) {
if(checkgc) mstats.next_gc = mstats.heap_alloc;
newx = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
clearbucket(newx);
newx = runtime·mallocgc(h->bucketsize, (uintptr)t->bucket, 0);
x->overflow = newx;
x = newx;
xi = 0;
@ -356,8 +341,7 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
} else {
if(yi == BUCKETSIZE) {
if(checkgc) mstats.next_gc = mstats.heap_alloc;
newy = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
clearbucket(newy);
newy = runtime·mallocgc(h->bucketsize, (uintptr)t->bucket, 0);
y->overflow = newy;
y = newy;
yi = 0;
@ -389,35 +373,18 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
b = nextb;
} while(b != nil);
// Free old overflow buckets as much as we can.
if((h->flags & OldIterator) == 0) {
b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
if((h->flags & CanFreeBucket) != 0) {
while((nextb = overflowptr(b)) != nil) {
b->overflow = nextb->overflow;
runtime·free(nextb);
}
} else {
// can't explicitly free overflow buckets, but at least
// we can unlink them.
b->overflow = (Bucket*)1;
}
}
// Unlink the overflow buckets to help GC.
b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
if((h->flags & OldIterator) == 0)
b->overflow = (Bucket*)1;
}
// advance evacuation mark
if(oldbucket == h->nevacuate) {
h->nevacuate = oldbucket + 1;
if(oldbucket + 1 == newbit) { // newbit == # of oldbuckets
if(oldbucket + 1 == newbit) // newbit == # of oldbuckets
// free main bucket array
if((h->flags & (OldIterator | CanFreeBucket)) == CanFreeBucket) {
ob = h->oldbuckets;
h->oldbuckets = nil;
runtime·free(ob);
} else {
h->oldbuckets = nil;
}
}
h->oldbuckets = nil;
}
if(docheck)
check(t, h);
@ -452,14 +419,10 @@ hash_grow(MapType *t, Hmap *h)
old_buckets = h->buckets;
// NOTE: this could be a big malloc, but since we don't need zeroing it is probably fast.
if(checkgc) mstats.next_gc = mstats.heap_alloc;
new_buckets = runtime·mallocgc((uintptr)h->bucketsize << (h->B + 1), 0, FlagNoZero);
new_buckets = runtime·mallocgc((uintptr)h->bucketsize << (h->B + 1), (uintptr)t->bucket | TypeInfo_Array, 0);
flags = (h->flags & ~(Iterator | OldIterator));
if((h->flags & Iterator) != 0) {
if((h->flags & Iterator) != 0)
flags |= OldIterator;
// We can't free indirect keys any more, as
// they are potentially aliased across buckets.
flags &= ~CanFreeKey;
}
// commit the grow (atomic wrt gc)
h->B++;
@ -614,11 +577,8 @@ hash_insert(MapType *t, Hmap *h, void *key, void *value)
check(t, h);
hash = h->hash0;
t->key->alg->hash(&hash, t->key->size, key);
if(h->buckets == nil) {
h->buckets = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
b = (Bucket*)(h->buckets);
clearbucket(b);
}
if(h->buckets == nil)
h->buckets = runtime·mallocgc(h->bucketsize, (uintptr)t->bucket | TypeInfo_Array, 0);
again:
bucket = hash & (((uintptr)1 << h->B) - 1);
@ -665,8 +625,7 @@ hash_insert(MapType *t, Hmap *h, void *key, void *value)
if(inserti == nil) {
// all current buckets are full, allocate a new one.
if(checkgc) mstats.next_gc = mstats.heap_alloc;
newb = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
clearbucket(newb);
newb = runtime·mallocgc(h->bucketsize, (uintptr)t->bucket, 0);
b->overflow = newb;
inserti = newb->tophash;
insertk = newb->data;
@ -676,13 +635,13 @@ hash_insert(MapType *t, Hmap *h, void *key, void *value)
// store new key/value at insert position
if((h->flags & IndirectKey) != 0) {
if(checkgc) mstats.next_gc = mstats.heap_alloc;
kmem = runtime·mallocgc(t->key->size, 0, FlagNoZero);
kmem = runtime·mallocgc(t->key->size, (uintptr)t->key, 0);
*(byte**)insertk = kmem;
insertk = kmem;
}
if((h->flags & IndirectValue) != 0) {
if(checkgc) mstats.next_gc = mstats.heap_alloc;
vmem = runtime·mallocgc(t->elem->size, 0, FlagNoZero);
vmem = runtime·mallocgc(t->elem->size, (uintptr)t->elem, 0);
*(byte**)insertv = vmem;
insertv = vmem;
}
@ -726,22 +685,20 @@ hash_remove(MapType *t, Hmap *h, void *key)
if(!eq)
continue;
if((h->flags & CanFreeKey) != 0) {
k = *(byte**)k;
if((h->flags & IndirectKey) != 0) {
*(byte**)k = nil;
} else {
t->key->alg->copy(t->key->size, k, nil);
}
if((h->flags & IndirectValue) != 0) {
v = *(byte**)v;
*(byte**)v = nil;
} else {
t->elem->alg->copy(t->elem->size, v, nil);
}
b->tophash[i] = 0;
h->count--;
if((h->flags & CanFreeKey) != 0) {
runtime·free(k);
}
if((h->flags & IndirectValue) != 0) {
runtime·free(v);
}
// TODO: consolidate buckets if they are mostly empty
// can only consolidate if there are no live iterators at this size.
if(docheck)
@ -804,7 +761,7 @@ hash_iter_init(MapType *t, Hmap *h, struct hash_iter *it)
it->bptr = nil;
// Remember we have an iterator.
// Can run concurrently with another hash_iter_init() and with reflect·mapiterinit().
// Can run concurrently with another hash_iter_init().
for(;;) {
old = h->flags;
if((old&(Iterator|OldIterator)) == (Iterator|OldIterator))
@ -944,157 +901,6 @@ next:
goto next;
}
#define PHASE_BUCKETS 0
#define PHASE_OLD_BUCKETS 1
#define PHASE_TABLE 2
#define PHASE_OLD_TABLE 3
#define PHASE_DONE 4
// Initialize the iterator.
// Returns false if Hmap contains no pointers (in which case the iterator is not initialized).
bool
hash_gciter_init (Hmap *h, struct hash_gciter *it)
{
// GC during map initialization or on an empty map.
if(h->buckets == nil)
return false;
it->h = h;
it->phase = PHASE_BUCKETS;
it->bucket = 0;
it->b = nil;
// TODO: finish evacuating oldbuckets so that we can collect
// oldbuckets? We don't want to keep a partially evacuated
// table around forever, so each gc could make at least some
// evacuation progress. Need to be careful about concurrent
// access if we do concurrent gc. Even if not, we don't want
// to make the gc pause any longer than it has to be.
return true;
}
// Returns true and fills *data with internal structure/key/value data,
// or returns false if the iterator has terminated.
// Ugh, this interface is really annoying. I want a callback fn!
bool
hash_gciter_next(struct hash_gciter *it, struct hash_gciter_data *data)
{
Hmap *h;
uintptr bucket, oldbucket;
Bucket *b, *oldb;
uintptr i;
byte *k, *v;
h = it->h;
bucket = it->bucket;
b = it->b;
i = it->i;
data->st = nil;
data->key_data = nil;
data->val_data = nil;
data->indirectkey = (h->flags & IndirectKey) != 0;
data->indirectval = (h->flags & IndirectValue) != 0;
next:
switch (it->phase) {
case PHASE_BUCKETS:
if(b != nil) {
k = b->data + h->keysize * i;
v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
if(b->tophash[i] != 0) {
data->key_data = k;
data->val_data = v;
it->bucket = bucket;
it->b = b;
it->i = i + 1;
return true;
}
}
b = b->overflow;
if(b != nil) {
data->st = (byte*)b;
it->bucket = bucket;
it->b = b;
it->i = 0;
return true;
}
}
while(bucket < ((uintptr)1 << h->B)) {
if(h->oldbuckets != nil) {
oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
oldb = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
if(!evacuated(oldb)) {
// new bucket isn't valid yet
bucket++;
continue;
}
}
b = (Bucket*)(h->buckets + bucket * h->bucketsize);
i = 0;
bucket++;
goto next;
}
it->phase = PHASE_OLD_BUCKETS;
bucket = 0;
b = nil;
goto next;
case PHASE_OLD_BUCKETS:
if(h->oldbuckets == nil) {
it->phase = PHASE_TABLE;
goto next;
}
if(b != nil) {
k = b->data + h->keysize * i;
v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
if(b->tophash[i] != 0) {
data->key_data = k;
data->val_data = v;
it->bucket = bucket;
it->b = b;
it->i = i + 1;
return true;
}
}
b = overflowptr(b);
if(b != nil) {
data->st = (byte*)b;
it->bucket = bucket;
it->b = b;
it->i = 0;
return true;
}
}
if(bucket < ((uintptr)1 << (h->B - 1))) {
b = (Bucket*)(h->oldbuckets + bucket * h->bucketsize);
bucket++;
i = 0;
goto next;
}
it->phase = PHASE_TABLE;
goto next;
case PHASE_TABLE:
it->phase = PHASE_OLD_TABLE;
data->st = h->buckets;
return true;
case PHASE_OLD_TABLE:
it->phase = PHASE_DONE;
if(h->oldbuckets != nil) {
data->st = h->oldbuckets;
return true;
} else {
goto next;
}
}
if(it->phase != PHASE_DONE)
runtime·throw("bad phase at done");
return false;
}
//
/// interfaces to go runtime
//
@ -1123,7 +929,7 @@ runtime·makemap_c(MapType *typ, int64 hint)
if(key->alg->hash == runtime·nohash)
runtime·throw("runtime.makemap: unsupported map key type");
h = runtime·mallocgc(sizeof(*h), (uintptr)typ | TypeInfo_Map, 0);
h = runtime·mallocgc(sizeof(*h), (uintptr)typ->hmap, 0);
hash_init(typ, h, hint);
// these calculations are compiler dependent.
@ -1387,26 +1193,6 @@ runtime·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
void
reflect·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
{
uint32 old, new;
if(h != nil && t->key->size > sizeof(void*)) {
// reflect·mapiterkey returns pointers to key data,
// and reflect holds them, so we cannot free key data
// eagerly anymore.
// Can run concurrently with another reflect·mapiterinit() and with hash_iter_init().
for(;;) {
old = h->flags;
if(old & IndirectKey)
new = old & ~CanFreeKey;
else
new = old & ~CanFreeBucket;
if(new == old)
break;
if(runtime·cas(&h->flags, old, new))
break;
}
}
it = runtime·mal(sizeof *it);
FLUSH(&it);
runtime·mapiterinit(t, h, it);

View File

@ -2,32 +2,3 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
struct Hmap; /* opaque */
/* Used by the garbage collector */
struct hash_gciter
{
Hmap *h;
int32 phase;
uintptr bucket;
struct Bucket *b;
uintptr i;
};
// this data is used by the garbage collector to keep the map's
// internal structures from being reclaimed. The iterator must
// return in st every live object (ones returned by mallocgc) so
// that those objects won't be collected, and it must return
// every key & value in key_data/val_data so they can get scanned
// for pointers they point to. Note that if you malloc storage
// for keys and values, you need to do both.
struct hash_gciter_data
{
uint8 *st; /* internal structure, or nil */
uint8 *key_data; /* key data, or nil */
uint8 *val_data; /* value data, or nil */
bool indirectkey; /* storing pointers to keys */
bool indirectval; /* storing pointers to values */
};
bool hash_gciter_init (struct Hmap *h, struct hash_gciter *it);
bool hash_gciter_next (struct hash_gciter *it, struct hash_gciter_data *data);

View File

@ -487,8 +487,7 @@ enum
{
TypeInfo_SingleObject = 0,
TypeInfo_Array = 1,
TypeInfo_Map = 2,
TypeInfo_Chan = 3,
TypeInfo_Chan = 2,
// Enables type information at the end of blocks allocated from heap
DebugTypeAtBlockEnd = 0,

View File

@ -176,7 +176,6 @@ static struct {
enum {
GC_DEFAULT_PTR = GC_NUM_INSTR,
GC_MAP_NEXT,
GC_CHAN,
GC_NUM_INSTR2
@ -580,9 +579,6 @@ flushobjbuf(Obj *objbuf, Obj **objbufpos, Obj **_wp, Workbuf **_wbuf, uintptr *_
// Program that scans the whole block and treats every block element as a potential pointer
static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
// Hashmap iterator program
static uintptr mapProg[2] = {0, GC_MAP_NEXT};
// Hchan program
static uintptr chanProg[2] = {0, GC_CHAN};
@ -622,8 +618,11 @@ checkptr(void *obj, uintptr objti)
}
tisize = *(uintptr*)objti;
// Sanity check for object size: it should fit into the memory block.
if((byte*)obj + tisize > objstart + s->elemsize)
if((byte*)obj + tisize > objstart + s->elemsize) {
runtime·printf("object of type '%S' at %p/%p does not fit in block %p/%p\n",
*t->string, obj, tisize, objstart, s->elemsize);
runtime·throw("invalid gc type info");
}
if(obj != objstart)
return;
// If obj points to the beginning of the memory block,
@ -639,7 +638,7 @@ checkptr(void *obj, uintptr objti)
for(j = 1; pc1[j] != GC_END && pc2[j] != GC_END; j++) {
if(pc1[j] != pc2[j]) {
runtime·printf("invalid gc type info for '%s' at %p, type info %p, block info %p\n",
t->string ? (int8*)t->string->str : (int8*)"?", j, pc1[j], pc2[j]);
t->string ? (int8*)t->string->str : (int8*)"?", j, pc1[j], pc2[j]);
runtime·throw("invalid gc type info");
}
}
@ -662,7 +661,7 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
byte *b, *arena_start, *arena_used;
uintptr n, i, end_b, elemsize, size, ti, objti, count, type;
uintptr *pc, precise_type, nominal_size;
uintptr *map_ret, mapkey_size, mapval_size, mapkey_ti, mapval_ti, *chan_ret, chancap;
uintptr *chan_ret, chancap;
void *obj;
Type *t;
Slice *sliceptr;
@ -672,11 +671,6 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
Obj *objbuf, *objbuf_end, *objbufpos;
Eface *eface;
Iface *iface;
Hmap *hmap;
MapType *maptype;
bool mapkey_kind, mapval_kind;
struct hash_gciter map_iter;
struct hash_gciter_data d;
Hchan *chan;
ChanType *chantype;
@ -705,10 +699,6 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
objbufpos = objbuf;
// (Silence the compiler)
map_ret = nil;
mapkey_size = mapval_size = 0;
mapkey_kind = mapval_kind = false;
mapkey_ti = mapval_ti = 0;
chan = nil;
chantype = nil;
chan_ret = nil;
@ -777,23 +767,6 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
stack_top.elemsize = pc[0];
stack_top.loop_or_ret = pc+1;
break;
case TypeInfo_Map:
hmap = (Hmap*)b;
maptype = (MapType*)t;
if(hash_gciter_init(hmap, &map_iter)) {
mapkey_size = maptype->key->size;
mapkey_kind = maptype->key->kind;
mapkey_ti = (uintptr)maptype->key->gc | PRECISE;
mapval_size = maptype->elem->size;
mapval_kind = maptype->elem->kind;
mapval_ti = (uintptr)maptype->elem->gc | PRECISE;
map_ret = nil;
pc = mapProg;
} else {
goto next_block;
}
break;
case TypeInfo_Chan:
chan = (Hchan*)b;
chantype = (ChanType*)t;
@ -994,77 +967,6 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
pc = (uintptr*)((byte*)pc + *(int32*)(pc+2)); // target of the CALL instruction
continue;
case GC_MAP_PTR:
hmap = *(Hmap**)(stack_top.b + pc[1]);
if(hmap == nil) {
pc += 3;
continue;
}
if(markonly(hmap)) {
maptype = (MapType*)pc[2];
if(hash_gciter_init(hmap, &map_iter)) {
mapkey_size = maptype->key->size;
mapkey_kind = maptype->key->kind;
mapkey_ti = (uintptr)maptype->key->gc | PRECISE;
mapval_size = maptype->elem->size;
mapval_kind = maptype->elem->kind;
mapval_ti = (uintptr)maptype->elem->gc | PRECISE;
// Start mapProg.
map_ret = pc+3;
pc = mapProg+1;
} else {
pc += 3;
}
} else {
pc += 3;
}
continue;
case GC_MAP_NEXT:
// Add all keys and values to buffers, mark all subtables.
while(hash_gciter_next(&map_iter, &d)) {
// buffers: reserve space for 2 objects.
if(ptrbufpos+2 >= ptrbuf_end)
flushptrbuf(ptrbuf, &ptrbufpos, &wp, &wbuf, &nobj);
if(objbufpos+2 >= objbuf_end)
flushobjbuf(objbuf, &objbufpos, &wp, &wbuf, &nobj);
if(d.st != nil)
markonly(d.st);
if(d.key_data != nil) {
if(!(mapkey_kind & KindNoPointers) || d.indirectkey) {
if(!d.indirectkey)
*objbufpos++ = (Obj){d.key_data, mapkey_size, mapkey_ti};
else {
if(Debug) {
obj = *(void**)d.key_data;
if(!(arena_start <= obj && obj < arena_used))
runtime·throw("scanblock: inconsistent hashmap");
}
*ptrbufpos++ = (PtrTarget){*(void**)d.key_data, mapkey_ti};
}
}
if(!(mapval_kind & KindNoPointers) || d.indirectval) {
if(!d.indirectval)
*objbufpos++ = (Obj){d.val_data, mapval_size, mapval_ti};
else {
if(Debug) {
obj = *(void**)d.val_data;
if(!(arena_start <= obj && obj < arena_used))
runtime·throw("scanblock: inconsistent hashmap");
}
*ptrbufpos++ = (PtrTarget){*(void**)d.val_data, mapval_ti};
}
}
}
}
if(map_ret == nil)
goto next_block;
pc = map_ret;
continue;
case GC_REGION:
obj = (void*)(stack_top.b + pc[1]);
size = pc[2];
@ -1077,7 +979,6 @@ scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
continue;
case GC_CHAN_PTR:
// Similar to GC_MAP_PTR
chan = *(Hchan**)(stack_top.b + pc[1]);
if(chan == nil) {
pc += 3;

View File

@ -26,7 +26,6 @@ enum {
GC_ARRAY_START, // Start an array with a fixed length. Args: (off, len, elemsize)
GC_ARRAY_NEXT, // The next element of an array. Args: none
GC_CALL, // Call a subroutine. Args: (off, objgcrel)
GC_MAP_PTR, // Go map. Args: (off, MapType*)
GC_CHAN_PTR, // Go channel. Args: (off, ChanType*)
GC_STRING, // Go string. Args: (off)
GC_EFACE, // interface{}. Args: (off)

View File

@ -70,6 +70,8 @@ struct MapType
Type;
Type *key;
Type *elem;
Type *bucket; // internal type representing a hash bucket
Type *hmap; // internal type representing a Hmap
};
struct ChanType