1
0
mirror of https://github.com/golang/go synced 2024-11-15 04:30:32 -07:00

internal/runtime/maps: optimize small map lookups with int keys

Load the field we need from the type once outside the search loop.
Get rid of the multiply to compute the slot position. Instead compute
the slot position incrementally using addition.
Move the hashing later in access2.

Based on khr@'s CL 618959.

For #54766.

Cq-Include-Trybots: luci.golang.try:gotip-linux-amd64-longtest-swissmap
Change-Id: Id11b5479fa5bc0130a1d8d9e664d0206d24942ea
Reviewed-on: https://go-review.googlesource.com/c/go/+/620217
Reviewed-by: Keith Randall <khr@golang.org>
Reviewed-by: Keith Randall <khr@google.com>
LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
Auto-Submit: Michael Pratt <mpratt@google.com>
This commit is contained in:
Michael Pratt 2024-10-14 15:08:27 -04:00 committed by Gopher Robot
parent 0b652e3ef6
commit d51f92e737
2 changed files with 55 additions and 80 deletions

View File

@ -13,32 +13,6 @@ import (
"unsafe"
)
func (m *Map) getWithoutKeySmallFast32(typ *abi.SwissMapType, hash uintptr, key uint32) (unsafe.Pointer, bool) {
g := groupReference{
data: m.dirPtr,
}
h2 := uint8(h2(hash))
ctrls := *g.ctrls()
for i := uintptr(0); i < 8; i++ {
c := uint8(ctrls)
ctrls >>= 8
if c != h2 {
continue
}
slotKey := g.key(typ, i)
if key == *(*uint32)(slotKey) {
slotElem := g.elem(typ, i)
return slotElem, true
}
}
return nil, false
}
//go:linkname runtime_mapaccess1_fast32 runtime.mapaccess1_fast32
func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe.Pointer {
if race.Enabled && m != nil {
@ -55,16 +29,23 @@ func runtime_mapaccess1_fast32(typ *abi.SwissMapType, m *Map, key uint32) unsafe
fatal("concurrent map read and map write")
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
if m.dirLen <= 0 {
elem, ok := m.getWithoutKeySmallFast32(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0])
if m.dirLen == 0 {
g := groupReference{
data: m.dirPtr,
}
return elem
slotSize := typ.SlotSize
for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
if key == *(*uint32)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
return slotElem
}
}
return unsafe.Pointer(&zeroVal[0])
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
@ -112,16 +93,23 @@ func runtime_mapaccess2_fast32(typ *abi.SwissMapType, m *Map, key uint32) (unsaf
fatal("concurrent map read and map write")
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
if m.dirLen <= 0 {
elem, ok := m.getWithoutKeySmallFast32(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0]), false
if m.dirLen == 0 {
g := groupReference{
data: m.dirPtr,
}
return elem, true
slotSize := typ.SlotSize
for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
if key == *(*uint32)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
return slotElem, true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)

View File

@ -13,32 +13,6 @@ import (
"unsafe"
)
func (m *Map) getWithoutKeySmallFast64(typ *abi.SwissMapType, hash uintptr, key uint64) (unsafe.Pointer, bool) {
g := groupReference{
data: m.dirPtr,
}
h2 := uint8(h2(hash))
ctrls := *g.ctrls()
for i := uintptr(0); i < 8; i++ {
c := uint8(ctrls)
ctrls >>= 8
if c != h2 {
continue
}
slotKey := g.key(typ, i)
if key == *(*uint64)(slotKey) {
slotElem := g.elem(typ, i)
return slotElem, true
}
}
return nil, false
}
//go:linkname runtime_mapaccess1_fast64 runtime.mapaccess1_fast64
func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe.Pointer {
if race.Enabled && m != nil {
@ -55,16 +29,23 @@ func runtime_mapaccess1_fast64(typ *abi.SwissMapType, m *Map, key uint64) unsafe
fatal("concurrent map read and map write")
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
if m.dirLen <= 0 {
elem, ok := m.getWithoutKeySmallFast64(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0])
if m.dirLen == 0 {
g := groupReference{
data: m.dirPtr,
}
return elem
slotSize := typ.SlotSize
for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
if key == *(*uint64)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
return slotElem
}
}
return unsafe.Pointer(&zeroVal[0])
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)
@ -112,16 +93,22 @@ func runtime_mapaccess2_fast64(typ *abi.SwissMapType, m *Map, key uint64) (unsaf
fatal("concurrent map read and map write")
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
if m.dirLen <= 0 {
elem, ok := m.getWithoutKeySmallFast64(typ, hash, key)
if !ok {
return unsafe.Pointer(&zeroVal[0]), false
if m.dirLen == 0 {
g := groupReference{
data: m.dirPtr,
}
return elem, true
slotSize := typ.SlotSize
for i, slotKey := uintptr(0), g.key(typ, 0); i < abi.SwissMapGroupSlots; i, slotKey = i+1, unsafe.Pointer(uintptr(slotKey)+slotSize) {
if key == *(*uint64)(slotKey) && (g.ctrls().get(i)&(1<<7)) == 0 {
slotElem := unsafe.Pointer(uintptr(slotKey) + typ.ElemOff)
return slotElem, true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed)
// Select table.
idx := m.directoryIndex(hash)
t := m.directoryAt(idx)