From 3c182a12f772c51e93195f2efaf84e3aff69109e Mon Sep 17 00:00:00 2001 From: Josh Bleecher Snyder Date: Fri, 18 Aug 2017 21:15:44 -0700 Subject: [PATCH] runtime: replace t.keysize with fixed key size in evacuate_fastX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: I89c3c3b21d7a4acbc49b14a52ac8d9a5861c0c39 Reviewed-on: https://go-review.googlesource.com/59131 Run-TryBot: Josh Bleecher Snyder TryBot-Result: Gobot Gobot Reviewed-by: Keith Randall Reviewed-by: Martin Möhrmann --- src/runtime/hashmap_fast.go | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) diff --git a/src/runtime/hashmap_fast.go b/src/runtime/hashmap_fast.go index 32e7cff090..626b3531f5 100644 --- a/src/runtime/hashmap_fast.go +++ b/src/runtime/hashmap_fast.go @@ -795,7 +795,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { x := &xy[0] x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) x.k = add(unsafe.Pointer(x.b), dataOffset) - x.v = add(x.k, bucketCnt*uintptr(t.keysize)) + x.v = add(x.k, bucketCnt*4) if !h.sameSizeGrow() { // Only calculate y pointers if we're growing bigger. @@ -803,13 +803,13 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { y := &xy[1] y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) y.k = add(unsafe.Pointer(y.b), dataOffset) - y.v = add(y.k, bucketCnt*uintptr(t.keysize)) + y.v = add(y.k, bucketCnt*4) } for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) - v := add(k, bucketCnt*uintptr(t.keysize)) - for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { + v := add(k, bucketCnt*4) + for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 4), add(v, uintptr(t.valuesize)) { top := b.tophash[i] if top == empty { b.tophash[i] = evacuatedEmpty @@ -855,7 +855,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { dst.b = h.newoverflow(t, dst.b) dst.i = 0 dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.v = add(dst.k, bucketCnt*uintptr(t.keysize)) + dst.v = add(dst.k, bucketCnt*4) } dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check if t.indirectkey { @@ -873,7 +873,7 @@ func evacuate_fast32(t *maptype, h *hmap, oldbucket uintptr) { // key or value arrays. That's ok, as we have the overflow pointer // at the end of the bucket to protect against pointing past the // end of the bucket. - dst.k = add(dst.k, uintptr(t.keysize)) + dst.k = add(dst.k, 4) dst.v = add(dst.v, uintptr(t.valuesize)) } } @@ -916,7 +916,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { x := &xy[0] x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) x.k = add(unsafe.Pointer(x.b), dataOffset) - x.v = add(x.k, bucketCnt*uintptr(t.keysize)) + x.v = add(x.k, bucketCnt*8) if !h.sameSizeGrow() { // Only calculate y pointers if we're growing bigger. @@ -924,13 +924,13 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { y := &xy[1] y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) y.k = add(unsafe.Pointer(y.b), dataOffset) - y.v = add(y.k, bucketCnt*uintptr(t.keysize)) + y.v = add(y.k, bucketCnt*8) } for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) - v := add(k, bucketCnt*uintptr(t.keysize)) - for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { + v := add(k, bucketCnt*8) + for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 8), add(v, uintptr(t.valuesize)) { top := b.tophash[i] if top == empty { b.tophash[i] = evacuatedEmpty @@ -976,7 +976,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { dst.b = h.newoverflow(t, dst.b) dst.i = 0 dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.v = add(dst.k, bucketCnt*uintptr(t.keysize)) + dst.v = add(dst.k, bucketCnt*8) } dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check if t.indirectkey { @@ -994,7 +994,7 @@ func evacuate_fast64(t *maptype, h *hmap, oldbucket uintptr) { // key or value arrays. That's ok, as we have the overflow pointer // at the end of the bucket to protect against pointing past the // end of the bucket. - dst.k = add(dst.k, uintptr(t.keysize)) + dst.k = add(dst.k, 8) dst.v = add(dst.v, uintptr(t.valuesize)) } } @@ -1037,7 +1037,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { x := &xy[0] x.b = (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize))) x.k = add(unsafe.Pointer(x.b), dataOffset) - x.v = add(x.k, bucketCnt*uintptr(t.keysize)) + x.v = add(x.k, bucketCnt*2*sys.PtrSize) if !h.sameSizeGrow() { // Only calculate y pointers if we're growing bigger. @@ -1045,13 +1045,13 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { y := &xy[1] y.b = (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize))) y.k = add(unsafe.Pointer(y.b), dataOffset) - y.v = add(y.k, bucketCnt*uintptr(t.keysize)) + y.v = add(y.k, bucketCnt*2*sys.PtrSize) } for ; b != nil; b = b.overflow(t) { k := add(unsafe.Pointer(b), dataOffset) - v := add(k, bucketCnt*uintptr(t.keysize)) - for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) { + v := add(k, bucketCnt*2*sys.PtrSize) + for i := 0; i < bucketCnt; i, k, v = i+1, add(k, 2*sys.PtrSize), add(v, uintptr(t.valuesize)) { top := b.tophash[i] if top == empty { b.tophash[i] = evacuatedEmpty @@ -1097,7 +1097,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { dst.b = h.newoverflow(t, dst.b) dst.i = 0 dst.k = add(unsafe.Pointer(dst.b), dataOffset) - dst.v = add(dst.k, bucketCnt*uintptr(t.keysize)) + dst.v = add(dst.k, bucketCnt*2*sys.PtrSize) } dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check if t.indirectkey { @@ -1115,7 +1115,7 @@ func evacuate_faststr(t *maptype, h *hmap, oldbucket uintptr) { // key or value arrays. That's ok, as we have the overflow pointer // at the end of the bucket to protect against pointing past the // end of the bucket. - dst.k = add(dst.k, uintptr(t.keysize)) + dst.k = add(dst.k, 2*sys.PtrSize) dst.v = add(dst.v, uintptr(t.valuesize)) } }