1
0
mirror of https://github.com/golang/go synced 2024-11-20 04:34:41 -07:00
go/src/runtime/hash64.go
Keith Randall 91059de095 runtime: make aeshash more DOS-proof
Improve the aeshash implementation to make it harder to engineer collisions.

1) Scramble the seed before xoring with the input string.  This
   makes it harder to cancel known portions of the seed (like the size)
   because it mixes the per-table seed into those other parts.

2) Use table-dependent seeds for all stripes when hashing >16 byte strings.

For small strings this change uses 4 aesenc ops instead of 3, so it
is somewhat slower.  The first two can run in parallel, though, so
it isn't 33% slower.

benchmark                            old ns/op     new ns/op     delta
BenchmarkHash64-12                   10.2          11.2          +9.80%
BenchmarkHash16-12                   5.71          6.13          +7.36%
BenchmarkHash5-12                    6.64          7.01          +5.57%
BenchmarkHashBytesSpeed-12           30.3          31.9          +5.28%
BenchmarkHash65536-12                2785          2882          +3.48%
BenchmarkHash1024-12                 53.6          55.4          +3.36%
BenchmarkHashStringArraySpeed-12     54.9          56.5          +2.91%
BenchmarkHashStringSpeed-12          18.7          19.2          +2.67%
BenchmarkHashInt32Speed-12           14.8          15.1          +2.03%
BenchmarkHashInt64Speed-12           14.5          14.5          +0.00%

Change-Id: I59ea124b5cb92b1c7e8584008257347f9049996c
Reviewed-on: https://go-review.googlesource.com/14124
Reviewed-by: jcd . <jcd@golang.org>
Run-TryBot: Keith Randall <khr@golang.org>
TryBot-Result: Gobot Gobot <gobot@golang.org>
2015-10-08 16:43:03 +00:00

90 lines
2.2 KiB
Go

// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Hashing algorithm inspired by
// xxhash: https://code.google.com/p/xxhash/
// cityhash: https://code.google.com/p/cityhash/
// +build amd64 amd64p32 arm64 ppc64 ppc64le
package runtime
import "unsafe"
const (
// Constants for multiplication: four random odd 64-bit numbers.
m1 = 16877499708836156737
m2 = 2820277070424839065
m3 = 9497967016996688599
m4 = 15839092249703872147
)
func memhash(p unsafe.Pointer, seed, s uintptr) uintptr {
if GOARCH == "amd64" && GOOS != "nacl" && useAeshash {
return aeshash(p, seed, s)
}
h := uint64(seed + s*hashkey[0])
tail:
switch {
case s == 0:
case s < 4:
h ^= uint64(*(*byte)(p))
h ^= uint64(*(*byte)(add(p, s>>1))) << 8
h ^= uint64(*(*byte)(add(p, s-1))) << 16
h = rotl_31(h*m1) * m2
case s <= 8:
h ^= uint64(readUnaligned32(p))
h ^= uint64(readUnaligned32(add(p, s-4))) << 32
h = rotl_31(h*m1) * m2
case s <= 16:
h ^= readUnaligned64(p)
h = rotl_31(h*m1) * m2
h ^= readUnaligned64(add(p, s-8))
h = rotl_31(h*m1) * m2
case s <= 32:
h ^= readUnaligned64(p)
h = rotl_31(h*m1) * m2
h ^= readUnaligned64(add(p, 8))
h = rotl_31(h*m1) * m2
h ^= readUnaligned64(add(p, s-16))
h = rotl_31(h*m1) * m2
h ^= readUnaligned64(add(p, s-8))
h = rotl_31(h*m1) * m2
default:
v1 := h
v2 := uint64(seed * hashkey[1])
v3 := uint64(seed * hashkey[2])
v4 := uint64(seed * hashkey[3])
for s >= 32 {
v1 ^= readUnaligned64(p)
v1 = rotl_31(v1*m1) * m2
p = add(p, 8)
v2 ^= readUnaligned64(p)
v2 = rotl_31(v2*m2) * m3
p = add(p, 8)
v3 ^= readUnaligned64(p)
v3 = rotl_31(v3*m3) * m4
p = add(p, 8)
v4 ^= readUnaligned64(p)
v4 = rotl_31(v4*m4) * m1
p = add(p, 8)
s -= 32
}
h = v1 ^ v2 ^ v3 ^ v4
goto tail
}
h ^= h >> 29
h *= m3
h ^= h >> 32
return uintptr(h)
}
// Note: in order to get the compiler to issue rotl instructions, we
// need to constant fold the shift amount by hand.
// TODO: convince the compiler to issue rotl instructions after inlining.
func rotl_31(x uint64) uint64 {
return (x << 31) | (x >> (64 - 31))
}