mirror of
https://github.com/golang/go
synced 2024-11-19 00:04:40 -07:00
d5e4c4061b
The equal algorithm used to take the size equal(p, q *T, size uintptr) bool With this change, it does not equal(p, q *T) bool Similarly for the hash algorithm. The size is rarely used, as most equal functions know the size of the thing they are comparing. For instance f32equal already knows its inputs are 4 bytes in size. For cases where the size is not known, we allocate a closure (one for each size needed) that points to an assembly stub that reads the size out of the closure and calls generic code that has a size argument. Reduces the size of the go binary by 0.07%. Performance impact is not measurable. Change-Id: I6e00adf3dde7ad2974adbcff0ee91e86d2194fec Reviewed-on: https://go-review.googlesource.com/2392 Reviewed-by: Russ Cox <rsc@golang.org>
90 lines
2.0 KiB
Go
90 lines
2.0 KiB
Go
// Copyright 2014 The Go Authors. All rights reserved.
|
|
// Use of this source code is governed by a BSD-style
|
|
// license that can be found in the LICENSE file.
|
|
|
|
// Hashing algorithm inspired by
|
|
// xxhash: https://code.google.com/p/xxhash/
|
|
// cityhash: https://code.google.com/p/cityhash/
|
|
|
|
// +build 386 arm
|
|
|
|
package runtime
|
|
|
|
import "unsafe"
|
|
|
|
const (
|
|
// Constants for multiplication: four random odd 32-bit numbers.
|
|
m1 = 3168982561
|
|
m2 = 3339683297
|
|
m3 = 832293441
|
|
m4 = 2336365089
|
|
)
|
|
|
|
func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
|
|
if GOARCH == "386" && GOOS != "nacl" && useAeshash {
|
|
return aeshash(p, seed, s)
|
|
}
|
|
h := uint32(seed + s*hashkey[0])
|
|
tail:
|
|
switch {
|
|
case s == 0:
|
|
case s < 4:
|
|
h ^= uint32(*(*byte)(p))
|
|
h ^= uint32(*(*byte)(add(p, s>>1))) << 8
|
|
h ^= uint32(*(*byte)(add(p, s-1))) << 16
|
|
h = rotl_15(h*m1) * m2
|
|
case s == 4:
|
|
h ^= readUnaligned32(p)
|
|
h = rotl_15(h*m1) * m2
|
|
case s <= 8:
|
|
h ^= readUnaligned32(p)
|
|
h = rotl_15(h*m1) * m2
|
|
h ^= readUnaligned32(add(p, s-4))
|
|
h = rotl_15(h*m1) * m2
|
|
case s <= 16:
|
|
h ^= readUnaligned32(p)
|
|
h = rotl_15(h*m1) * m2
|
|
h ^= readUnaligned32(add(p, 4))
|
|
h = rotl_15(h*m1) * m2
|
|
h ^= readUnaligned32(add(p, s-8))
|
|
h = rotl_15(h*m1) * m2
|
|
h ^= readUnaligned32(add(p, s-4))
|
|
h = rotl_15(h*m1) * m2
|
|
default:
|
|
v1 := h
|
|
v2 := uint32(hashkey[1])
|
|
v3 := uint32(hashkey[2])
|
|
v4 := uint32(hashkey[3])
|
|
for s >= 16 {
|
|
v1 ^= readUnaligned32(p)
|
|
v1 = rotl_15(v1*m1) * m2
|
|
p = add(p, 4)
|
|
v2 ^= readUnaligned32(p)
|
|
v2 = rotl_15(v2*m2) * m3
|
|
p = add(p, 4)
|
|
v3 ^= readUnaligned32(p)
|
|
v3 = rotl_15(v3*m3) * m4
|
|
p = add(p, 4)
|
|
v4 ^= readUnaligned32(p)
|
|
v4 = rotl_15(v4*m4) * m1
|
|
p = add(p, 4)
|
|
s -= 16
|
|
}
|
|
h = v1 ^ v2 ^ v3 ^ v4
|
|
goto tail
|
|
}
|
|
h ^= h >> 17
|
|
h *= m3
|
|
h ^= h >> 13
|
|
h *= m4
|
|
h ^= h >> 16
|
|
return uintptr(h)
|
|
}
|
|
|
|
// Note: in order to get the compiler to issue rotl instructions, we
|
|
// need to constant fold the shift amount by hand.
|
|
// TODO: convince the compiler to issue rotl instructions after inlining.
|
|
func rotl_15(x uint32) uint32 {
|
|
return (x << 15) | (x >> (32 - 15))
|
|
}
|