mirror of
https://github.com/golang/go
synced 2024-11-19 16:24:45 -07:00
3bdc2f3abf
Currently we inline array comparisons for arrays with at most 4 elements. Compare arrays with small size, but more than 4 elements (e. g. [16]byte) with larger compares. This provides very slightly smaller binaries, and results in faster code. ArrayEqual-6 7.41ns ± 0% 3.17ns ± 0% -57.15% (p=0.000 n=10+10) For go tool: global text (code) = -559 bytes (-0.014566%) This also helps mapaccess1_faststr, and maps in general: MapDelete/Str/1-6 195ns ± 1% 186ns ± 2% -4.47% (p=0.000 n=10+10) MapDelete/Str/2-6 211ns ± 1% 177ns ± 1% -16.01% (p=0.000 n=10+10) MapDelete/Str/4-6 225ns ± 1% 183ns ± 1% -18.49% (p=0.000 n=8+10) MapStringKeysEight_16-6 31.3ns ± 0% 28.6ns ± 0% -8.63% (p=0.000 n=6+9) MapStringKeysEight_32-6 29.2ns ± 0% 27.6ns ± 0% -5.45% (p=0.000 n=10+10) MapStringKeysEight_64-6 29.1ns ± 1% 27.5ns ± 0% -5.46% (p=0.000 n=10+10) MapStringKeysEight_1M-6 29.1ns ± 1% 27.6ns ± 0% -5.49% (p=0.000 n=10+10) Change-Id: I9ec98e41b233031e0e96c4e13d86a324f628ed4a Reviewed-on: https://go-review.googlesource.com/40771 Run-TryBot: Ilya Tocar <ilya.tocar@intel.com> TryBot-Result: Gobot Gobot <gobot@golang.org> Reviewed-by: Keith Randall <khr@golang.org>
859 lines
23 KiB
Go
859 lines
23 KiB
Go
// Copyright 2014 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
package runtime
|
|
|
|
import (
|
|
"runtime/internal/sys"
|
|
"unsafe"
|
|
)
|
|
|
|
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
var b *bmap
|
|
if h.B == 0 {
|
|
// One-bucket table. No need to hash.
|
|
b = (*bmap)(h.buckets)
|
|
} else {
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
m := uintptr(1)<<h.B - 1
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
|
if k != key {
|
|
continue
|
|
}
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
}
|
|
}
|
|
|
|
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
var b *bmap
|
|
if h.B == 0 {
|
|
// One-bucket table. No need to hash.
|
|
b = (*bmap)(h.buckets)
|
|
} else {
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
m := uintptr(1)<<h.B - 1
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
|
if k != key {
|
|
continue
|
|
}
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
}
|
|
}
|
|
|
|
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
var b *bmap
|
|
if h.B == 0 {
|
|
// One-bucket table. No need to hash.
|
|
b = (*bmap)(h.buckets)
|
|
} else {
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
m := uintptr(1)<<h.B - 1
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
if k != key {
|
|
continue
|
|
}
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
}
|
|
}
|
|
|
|
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
var b *bmap
|
|
if h.B == 0 {
|
|
// One-bucket table. No need to hash.
|
|
b = (*bmap)(h.buckets)
|
|
} else {
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
m := uintptr(1)<<h.B - 1
|
|
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
if k != key {
|
|
continue
|
|
}
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
}
|
|
}
|
|
|
|
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
key := stringStructOf(&ky)
|
|
if h.B == 0 {
|
|
// One-bucket table.
|
|
b := (*bmap)(h.buckets)
|
|
if key.len < 32 {
|
|
// short key, doing lots of comparisons is ok
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
// long key, try not to do more comparisons than necessary
|
|
keymaybe := uintptr(bucketCnt)
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str == key.str {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
|
|
}
|
|
// check first 4 bytes
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
continue
|
|
}
|
|
// check last 4 bytes
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
continue
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
|
goto dohash
|
|
}
|
|
keymaybe = i
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
dohash:
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
|
m := uintptr(1)<<h.B - 1
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x != top {
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
|
|
}
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
return unsafe.Pointer(&zeroVal[0])
|
|
}
|
|
}
|
|
}
|
|
|
|
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map read and map write")
|
|
}
|
|
key := stringStructOf(&ky)
|
|
if h.B == 0 {
|
|
// One-bucket table.
|
|
b := (*bmap)(h.buckets)
|
|
if key.len < 32 {
|
|
// short key, doing lots of comparisons is ok
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
// long key, try not to do more comparisons than necessary
|
|
keymaybe := uintptr(bucketCnt)
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x == empty {
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str == key.str {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
|
|
}
|
|
// check first 4 bytes
|
|
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
|
|
continue
|
|
}
|
|
// check last 4 bytes
|
|
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
|
|
continue
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
// Two keys are potential matches. Use hash to distinguish them.
|
|
goto dohash
|
|
}
|
|
keymaybe = i
|
|
}
|
|
if keymaybe != bucketCnt {
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
|
|
if memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
|
|
}
|
|
}
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
dohash:
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
|
m := uintptr(1)<<h.B - 1
|
|
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
|
|
if c := h.oldbuckets; c != nil {
|
|
if !h.sameSizeGrow() {
|
|
// There used to be half as many buckets; mask down one more power of two.
|
|
m >>= 1
|
|
}
|
|
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
|
|
if !evacuated(oldb) {
|
|
b = oldb
|
|
}
|
|
}
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
|
|
if x != top {
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
|
|
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
|
|
}
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
return unsafe.Pointer(&zeroVal[0]), false
|
|
}
|
|
}
|
|
}
|
|
|
|
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
|
|
if h == nil {
|
|
panic(plainError("assignment to entry in nil map"))
|
|
}
|
|
if raceenabled {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapassign.
|
|
h.flags |= hashWriting
|
|
|
|
if h.buckets == nil {
|
|
h.buckets = newarray(t.bucket, 1)
|
|
}
|
|
|
|
again:
|
|
bucket := hash & (uintptr(1)<<h.B - 1)
|
|
if h.growing() {
|
|
growWork(t, h, bucket)
|
|
}
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
|
|
var inserti *uint8
|
|
var insertk unsafe.Pointer
|
|
var val unsafe.Pointer
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
if b.tophash[i] != top {
|
|
if b.tophash[i] == empty && inserti == nil {
|
|
inserti = &b.tophash[i]
|
|
insertk = add(unsafe.Pointer(b), dataOffset+i*4)
|
|
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
|
|
}
|
|
continue
|
|
}
|
|
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
|
|
if k != key {
|
|
continue
|
|
}
|
|
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
|
|
goto done
|
|
}
|
|
ovf := b.overflow(t)
|
|
if ovf == nil {
|
|
break
|
|
}
|
|
b = ovf
|
|
}
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
// and we're not already in the middle of growing, start growing.
|
|
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
hashGrow(t, h)
|
|
goto again // Growing the table invalidates everything, so try again
|
|
}
|
|
|
|
if inserti == nil {
|
|
// all current buckets are full, allocate a new one.
|
|
newb := h.newoverflow(t, b)
|
|
inserti = &newb.tophash[0]
|
|
insertk = add(unsafe.Pointer(newb), dataOffset)
|
|
val = add(insertk, bucketCnt*4)
|
|
}
|
|
|
|
// store new key/value at insert position
|
|
*((*uint32)(insertk)) = key
|
|
*inserti = top
|
|
h.count++
|
|
|
|
done:
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
return val
|
|
}
|
|
|
|
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
|
|
if h == nil {
|
|
panic(plainError("assignment to entry in nil map"))
|
|
}
|
|
if raceenabled {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapassign.
|
|
h.flags |= hashWriting
|
|
|
|
if h.buckets == nil {
|
|
h.buckets = newarray(t.bucket, 1)
|
|
}
|
|
|
|
again:
|
|
bucket := hash & (uintptr(1)<<h.B - 1)
|
|
if h.growing() {
|
|
growWork(t, h, bucket)
|
|
}
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
|
|
var inserti *uint8
|
|
var insertk unsafe.Pointer
|
|
var val unsafe.Pointer
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
if b.tophash[i] != top {
|
|
if b.tophash[i] == empty && inserti == nil {
|
|
inserti = &b.tophash[i]
|
|
insertk = add(unsafe.Pointer(b), dataOffset+i*8)
|
|
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
|
|
}
|
|
continue
|
|
}
|
|
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
|
|
if k != key {
|
|
continue
|
|
}
|
|
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
|
|
goto done
|
|
}
|
|
ovf := b.overflow(t)
|
|
if ovf == nil {
|
|
break
|
|
}
|
|
b = ovf
|
|
}
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
// and we're not already in the middle of growing, start growing.
|
|
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
hashGrow(t, h)
|
|
goto again // Growing the table invalidates everything, so try again
|
|
}
|
|
|
|
if inserti == nil {
|
|
// all current buckets are full, allocate a new one.
|
|
newb := h.newoverflow(t, b)
|
|
inserti = &newb.tophash[0]
|
|
insertk = add(unsafe.Pointer(newb), dataOffset)
|
|
val = add(insertk, bucketCnt*8)
|
|
}
|
|
|
|
// store new key/value at insert position
|
|
*((*uint64)(insertk)) = key
|
|
*inserti = top
|
|
h.count++
|
|
|
|
done:
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
return val
|
|
}
|
|
|
|
func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
|
|
if h == nil {
|
|
panic(plainError("assignment to entry in nil map"))
|
|
}
|
|
if raceenabled {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
key := stringStructOf(&ky)
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapassign.
|
|
h.flags |= hashWriting
|
|
|
|
if h.buckets == nil {
|
|
h.buckets = newarray(t.bucket, 1)
|
|
}
|
|
|
|
again:
|
|
bucket := hash & (uintptr(1)<<h.B - 1)
|
|
if h.growing() {
|
|
growWork(t, h, bucket)
|
|
}
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
|
|
var inserti *uint8
|
|
var insertk unsafe.Pointer
|
|
var val unsafe.Pointer
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
if b.tophash[i] != top {
|
|
if b.tophash[i] == empty && inserti == nil {
|
|
inserti = &b.tophash[i]
|
|
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
|
|
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
|
|
}
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
|
continue
|
|
}
|
|
// already have a mapping for key. Update it.
|
|
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
|
|
goto done
|
|
}
|
|
ovf := b.overflow(t)
|
|
if ovf == nil {
|
|
break
|
|
}
|
|
b = ovf
|
|
}
|
|
|
|
// Did not find mapping for key. Allocate new cell & add entry.
|
|
|
|
// If we hit the max load factor or we have too many overflow buckets,
|
|
// and we're not already in the middle of growing, start growing.
|
|
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
|
|
hashGrow(t, h)
|
|
goto again // Growing the table invalidates everything, so try again
|
|
}
|
|
|
|
if inserti == nil {
|
|
// all current buckets are full, allocate a new one.
|
|
newb := h.newoverflow(t, b)
|
|
inserti = &newb.tophash[0]
|
|
insertk = add(unsafe.Pointer(newb), dataOffset)
|
|
val = add(insertk, bucketCnt*2*sys.PtrSize)
|
|
}
|
|
|
|
// store new key/value at insert position
|
|
*((*stringStruct)(insertk)) = *key
|
|
*inserti = top
|
|
h.count++
|
|
|
|
done:
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
return val
|
|
}
|
|
|
|
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapdelete
|
|
h.flags |= hashWriting
|
|
|
|
bucket := hash & (uintptr(1)<<h.B - 1)
|
|
if h.growing() {
|
|
growWork(t, h, bucket)
|
|
}
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
if b.tophash[i] != top {
|
|
continue
|
|
}
|
|
k := (*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))
|
|
if key != *k {
|
|
continue
|
|
}
|
|
*k = 0
|
|
v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*4 + i*uintptr(t.valuesize))
|
|
typedmemclr(t.elem, v)
|
|
b.tophash[i] = empty
|
|
h.count--
|
|
goto done
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
goto done
|
|
}
|
|
}
|
|
|
|
done:
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
}
|
|
|
|
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapdelete
|
|
h.flags |= hashWriting
|
|
|
|
bucket := hash & (uintptr(1)<<h.B - 1)
|
|
if h.growing() {
|
|
growWork(t, h, bucket)
|
|
}
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
if b.tophash[i] != top {
|
|
continue
|
|
}
|
|
k := (*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))
|
|
if key != *k {
|
|
continue
|
|
}
|
|
*k = 0
|
|
v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*8 + i*uintptr(t.valuesize))
|
|
typedmemclr(t.elem, v)
|
|
b.tophash[i] = empty
|
|
h.count--
|
|
goto done
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
goto done
|
|
}
|
|
}
|
|
|
|
done:
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
}
|
|
|
|
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
|
|
if raceenabled && h != nil {
|
|
callerpc := getcallerpc(unsafe.Pointer(&t))
|
|
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
|
|
}
|
|
if h == nil || h.count == 0 {
|
|
return
|
|
}
|
|
if h.flags&hashWriting != 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
|
|
key := stringStructOf(&ky)
|
|
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
|
|
|
|
// Set hashWriting after calling alg.hash for consistency with mapdelete
|
|
h.flags |= hashWriting
|
|
|
|
bucket := hash & (uintptr(1)<<h.B - 1)
|
|
if h.growing() {
|
|
growWork(t, h, bucket)
|
|
}
|
|
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
|
|
top := uint8(hash >> (sys.PtrSize*8 - 8))
|
|
if top < minTopHash {
|
|
top += minTopHash
|
|
}
|
|
for {
|
|
for i := uintptr(0); i < bucketCnt; i++ {
|
|
if b.tophash[i] != top {
|
|
continue
|
|
}
|
|
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
|
|
if k.len != key.len {
|
|
continue
|
|
}
|
|
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
|
|
continue
|
|
}
|
|
typedmemclr(t.key, unsafe.Pointer(k))
|
|
v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*2*sys.PtrSize + i*uintptr(t.valuesize))
|
|
typedmemclr(t.elem, v)
|
|
b.tophash[i] = empty
|
|
h.count--
|
|
goto done
|
|
}
|
|
b = b.overflow(t)
|
|
if b == nil {
|
|
goto done
|
|
}
|
|
}
|
|
|
|
done:
|
|
if h.flags&hashWriting == 0 {
|
|
throw("concurrent map writes")
|
|
}
|
|
h.flags &^= hashWriting
|
|
}
|