1
0
mirror of https://github.com/golang/go synced 2024-11-22 14:15:05 -07:00

undo CL 12840043 / 3b9f54db72a1

Breaks the build.  Old bucket arrays kept by iterators
still need to be scanned.

««« original CL description
runtime: tell GC not to scan internal hashmap structures.
We'll do it ourselves via hash_gciter, thanks.
Fixes bug 6119.

R=golang-dev, dvyukov, cookieo9, rsc
CC=golang-dev
https://golang.org/cl/12840043
»»»

R=golang-dev
CC=golang-dev
https://golang.org/cl/12884043
This commit is contained in:
Keith Randall 2013-08-13 12:59:39 -07:00
parent 0df438c683
commit 74e78df107

View File

@ -259,10 +259,7 @@ hash_init(MapType *t, Hmap *h, uint32 hint)
// done lazily later. // done lazily later.
buckets = nil; buckets = nil;
} else { } else {
buckets = runtime·mallocgc(bucketsize << B, 0, FlagNoZero | FlagNoPointers); buckets = runtime·mallocgc(bucketsize << B, 0, FlagNoZero);
// Note: the array really does have pointers, but we tell the gc about
// them explicitly via gciter below. We use FlagNoPointers to prevent
// the gc from scanning the bucket array itself. Fixes issue 6119.
for(i = 0; i < (uintptr)1 << B; i++) { for(i = 0; i < (uintptr)1 << B; i++) {
b = (Bucket*)(buckets + i * bucketsize); b = (Bucket*)(buckets + i * bucketsize);
clearbucket(b); clearbucket(b);
@ -333,7 +330,7 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
if((hash & newbit) == 0) { if((hash & newbit) == 0) {
if(xi == BUCKETSIZE) { if(xi == BUCKETSIZE) {
if(checkgc) mstats.next_gc = mstats.heap_alloc; if(checkgc) mstats.next_gc = mstats.heap_alloc;
newx = runtime·mallocgc(h->bucketsize, 0, FlagNoZero | FlagNoPointers); newx = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
clearbucket(newx); clearbucket(newx);
x->overflow = newx; x->overflow = newx;
x = newx; x = newx;
@ -358,7 +355,7 @@ evacuate(MapType *t, Hmap *h, uintptr oldbucket)
} else { } else {
if(yi == BUCKETSIZE) { if(yi == BUCKETSIZE) {
if(checkgc) mstats.next_gc = mstats.heap_alloc; if(checkgc) mstats.next_gc = mstats.heap_alloc;
newy = runtime·mallocgc(h->bucketsize, 0, FlagNoZero | FlagNoPointers); newy = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
clearbucket(newy); clearbucket(newy);
y->overflow = newy; y->overflow = newy;
y = newy; y = newy;
@ -454,7 +451,7 @@ hash_grow(MapType *t, Hmap *h)
old_buckets = h->buckets; old_buckets = h->buckets;
// NOTE: this could be a big malloc, but since we don't need zeroing it is probably fast. // NOTE: this could be a big malloc, but since we don't need zeroing it is probably fast.
if(checkgc) mstats.next_gc = mstats.heap_alloc; if(checkgc) mstats.next_gc = mstats.heap_alloc;
new_buckets = runtime·mallocgc((uintptr)h->bucketsize << (h->B + 1), 0, FlagNoZero | FlagNoPointers); new_buckets = runtime·mallocgc((uintptr)h->bucketsize << (h->B + 1), 0, FlagNoZero);
flags = (h->flags & ~(Iterator | OldIterator)); flags = (h->flags & ~(Iterator | OldIterator));
if((h->flags & Iterator) != 0) { if((h->flags & Iterator) != 0) {
flags |= OldIterator; flags |= OldIterator;
@ -618,7 +615,7 @@ hash_insert(MapType *t, Hmap *h, void *key, void *value)
hash = h->hash0; hash = h->hash0;
t->key->alg->hash(&hash, t->key->size, key); t->key->alg->hash(&hash, t->key->size, key);
if(h->buckets == nil) { if(h->buckets == nil) {
h->buckets = runtime·mallocgc(h->bucketsize, 0, FlagNoZero | FlagNoPointers); h->buckets = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
b = (Bucket*)(h->buckets); b = (Bucket*)(h->buckets);
clearbucket(b); clearbucket(b);
} }
@ -668,7 +665,7 @@ hash_insert(MapType *t, Hmap *h, void *key, void *value)
if(inserti == nil) { if(inserti == nil) {
// all current buckets are full, allocate a new one. // all current buckets are full, allocate a new one.
if(checkgc) mstats.next_gc = mstats.heap_alloc; if(checkgc) mstats.next_gc = mstats.heap_alloc;
newb = runtime·mallocgc(h->bucketsize, 0, FlagNoZero | FlagNoPointers); newb = runtime·mallocgc(h->bucketsize, 0, FlagNoZero);
clearbucket(newb); clearbucket(newb);
b->overflow = newb; b->overflow = newb;
inserti = newb->tophash; inserti = newb->tophash;